filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
examples/rates/current_rate/current_rate.go | package main
import (
"log"
"os"
"github.com/tonicpow/go-tonicpow"
)
func main() {
// Load the api client
client, err := tonicpow.NewClient(
tonicpow.WithAPIKey(os.Getenv("TONICPOW_API_KEY")),
tonicpow.WithEnvironmentString(os.Getenv("TONICPOW_ENVIRONMENT")),
)
if err != nil {
log.Fatalf("error in NewClient: %s", err.Error())
}
// Get current rate
var rate *tonicpow.Rate
rate, _, err = client.GetCurrentRate("usd", 1.00)
if err != nil {
log.Fatalf("error in GetCurrentRate: %s", err.Error())
}
log.Printf("rate: %s %f is %d sats", rate.Currency, rate.CurrencyAmount, rate.PriceInSatoshis)
}
| [
"\"TONICPOW_API_KEY\"",
"\"TONICPOW_ENVIRONMENT\""
]
| []
| [
"TONICPOW_API_KEY",
"TONICPOW_ENVIRONMENT"
]
| [] | ["TONICPOW_API_KEY", "TONICPOW_ENVIRONMENT"] | go | 2 | 0 | |
auth/main.go | package main
import (
"os"
"fmt"
"encoding/json"
"github.com/streadway/amqp"
"gopkg.in/guregu/null.v3"
"golang.org/x/crypto/bcrypt"
"github.com/juju/ansiterm"
"github.com/juju/loggo"
"github.com/juju/loggo/loggocolor"
)
var log = loggo.GetLogger("")
var VERSION string
var COMMITHASH string
func failOnError(err error, msg string) {
if err != nil {
log.Criticalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
}
type PasswordParam struct {
Password string `json:"password"`
Hash null.String `json:"hash"`
}
type HashPasswordResult struct {
Hash string `json:"hash"`
}
type VerifyPasswordResult struct {
Valid int `json:"valid"`
}
type JsonRpcError struct {
Code int `json:"code"`
Message string `json:"message"`
Data null.String `json:"data"`
}
type JsonRpcRequest struct {
JsonRpc string `json:"jsonrpc"`
Method string `json:"method"`
Param PasswordParam `json:"param"`
Id null.Int `json:"id"`
}
type JsonRpcParam interface { }
type JsonRpcResult interface { }
type JsonRpcResponse interface { }
type JsonRpcRes struct {
JsonRpc string `json:"jsonrpc"`
Result JsonRpcResult `json:"result"`
Id null.Int `json:"id"`
}
type JsonRpcErr struct {
JsonRpc string `json:"jsonrpc"`
Error JsonRpcError `json:"error"`
Id null.Int `json:"id"`
}
func ExecuteHash(param PasswordParam) (HashPasswordResult, *JsonRpcError) {
log.Tracef("[Auth] ExecuteHash")
bytes, err := bcrypt.GenerateFromPassword([]byte(param.Password), 14)
var a HashPasswordResult
var e *JsonRpcError
if err == nil {
log.Infof("[Auth] Hash generated")
a = HashPasswordResult{
Hash: string(bytes),
}
} else {
log.Errorf("[Auth] Error during hash: %s", err)
e = &JsonRpcError{
Code: -32603,
Message: "Internal error",
Data: null.StringFrom(err.Error()),
}
}
return a, e
}
func ExecuteVerify(param PasswordParam) (VerifyPasswordResult, *JsonRpcError) {
log.Tracef("[Auth] ExecuteVerify")
hash := param.Hash.ValueOrZero()
err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(param.Password))
var a VerifyPasswordResult
var e *JsonRpcError
if err == nil {
log.Infof("[Auth] Password verified")
a = VerifyPasswordResult{
Valid: 1,
}
} else if err == bcrypt.ErrMismatchedHashAndPassword {
log.Infof("[Auth] Password wrong")
a = VerifyPasswordResult{
Valid: 0,
}
} else {
log.Errorf("[Auth] Error during verify: %s", err)
e = &JsonRpcError{
Code: -32603,
Message: "Internal error",
Data: null.StringFrom(err.Error()),
}
}
return a, e
}
func main() {
loggocolor.SeverityColor = map[loggo.Level]*ansiterm.Context{
loggo.TRACE: ansiterm.Foreground(ansiterm.Cyan),
loggo.DEBUG: ansiterm.Foreground(ansiterm.Cyan),
loggo.INFO: ansiterm.Foreground(ansiterm.Green),
loggo.WARNING: ansiterm.Foreground(ansiterm.BrightYellow),
loggo.ERROR: ansiterm.Foreground(ansiterm.BrightRed),
loggo.CRITICAL: &ansiterm.Context{
Foreground: ansiterm.White,
Background: ansiterm.Red,
},
}
loggocolor.LocationColor = ansiterm.Foreground(ansiterm.Default)
loggo.RemoveWriter("default")
loggo.RegisterWriter("default", loggocolor.NewWriter(os.Stdout))
verbosity := os.Args[2]
switch verbosity {
case "fatal":
log.SetLogLevel(loggo.CRITICAL)
case "error":
log.SetLogLevel(loggo.ERROR)
case "warn":
log.SetLogLevel(loggo.WARNING)
case "info":
log.SetLogLevel(loggo.INFO)
case "debug":
log.SetLogLevel(loggo.DEBUG)
case "trace":
log.SetLogLevel(loggo.TRACE)
default:
log.SetLogLevel(loggo.INFO)
}
log.Infof("[Main] Verbosity: %s", verbosity)
log.Infof("[Main] VERSION: %s", VERSION)
log.Infof("[Main] COMMITHASH: %s", COMMITHASH)
channelName := os.Args[1]
log.Infof("[Main] Channel name: %s", channelName)
rabbitUser := os.Getenv("RABBIT_USER")
if rabbitUser == "" {
rabbitUser = "guest"
}
rabbitPass := os.Getenv("RABBIT_PASS")
if rabbitPass == "" {
rabbitPass = "guest"
}
rabbitHost := os.Getenv("RABBIT_HOST")
if rabbitHost == "" {
rabbitHost = "localhost"
}
log.Infof("[Main] Rabbit host: %s", rabbitHost)
log.Infof("[Main] Rabbit user: %s", rabbitUser)
rabbit := fmt.Sprintf("amqp://%s:%s@%s:5672/", rabbitUser, rabbitPass, rabbitHost)
log.Debugf("[Main] amqp.Dial")
log.Tracef("[Main] url: %s", rabbit)
conn, err := amqp.Dial(rabbit)
failOnError(err, "Failed to connect to RabbitMQ")
defer conn.Close()
log.Debugf("[Main] conn.Channel")
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
defer ch.Close()
log.Debugf("[Main] ch.QueueDeclare")
q, err := ch.QueueDeclare(
channelName, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
log.Debugf("[Main] ch.Qos")
err = ch.Qos(
1, // prefetch count
0, // prefetch size
false, // global
)
failOnError(err, "Failed to set QoS")
log.Debugf("[Main] ch.Consume")
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
forever := make(chan bool)
log.Tracef("[Main] go func")
go func() {
log.Infof("[Main] Listening auth")
for d := range msgs {
log.Infof("[Rpc] Message from %s", d.ReplyTo)
if d.ReplyTo == "" {
log.Warningf("[Rpc] Rejecting")
d.Reject(false)
continue
}
var m JsonRpcRequest
var res JsonRpcResponse
err = json.Unmarshal(d.Body, &m)
if err != nil {
log.Warningf("[Rpc] Json parse error: %s", err)
res = JsonRpcErr{
JsonRpc: "2.0",
Error: JsonRpcError{
Code: -32700,
Message: "Parse error",
},
Id: null.Int{},
}
} else if m.JsonRpc != "2.0" {
log.Warningf("[Rpc] Json rpc version incorrect")
res = JsonRpcErr{
JsonRpc: "2.0",
Error: JsonRpcError{
Code: -32600,
Message: "Invalid Request",
},
Id: m.Id,
}
} else if m.Method == "hashPassword" {
log.Debugf("[Rpc] Method called: %s", m.Method)
r, err := ExecuteHash(m.Param)
if err == nil {
res = JsonRpcRes{
JsonRpc: "2.0",
Result: r,
Id: m.Id,
}
} else {
res = JsonRpcErr{
JsonRpc: "2.0",
Error: *err,
Id: m.Id,
}
}
} else if m.Method == "verifyPassword" {
log.Debugf("[Rpc] Method called: %s", m.Method)
r, err := ExecuteVerify(m.Param)
if err == nil {
res = JsonRpcRes{
JsonRpc: "2.0",
Result: r,
Id: m.Id,
}
} else {
res = JsonRpcErr{
JsonRpc: "2.0",
Error: *err,
Id: m.Id,
}
}
} else {
log.Errorf("[Rpc] Method not found: %s", m.Method)
res = JsonRpcErr{
JsonRpc: "2.0",
Error: JsonRpcError{
Code: -32601,
Message: "Method not found",
},
Id: m.Id,
}
}
str, err := json.Marshal(res)
if err != nil {
log.Errorf("[Rpc] Error during marshal: %s", err)
str = []byte(`{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error"},"id":null}`)
}
log.Debugf("[Rpc] Response: %s", str)
err = ch.Publish(
"", // exchange
d.ReplyTo, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
Body: str,
},
)
if err != nil {
log.Errorf("[Rpc] Error during reply: %s", err)
}
d.Ack(false)
}
}()
<-forever
}
| [
"\"RABBIT_USER\"",
"\"RABBIT_PASS\"",
"\"RABBIT_HOST\""
]
| []
| [
"RABBIT_HOST",
"RABBIT_PASS",
"RABBIT_USER"
]
| [] | ["RABBIT_HOST", "RABBIT_PASS", "RABBIT_USER"] | go | 3 | 0 | |
worldbuilders/vulns/vulns.go | package inventory
import (
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"cloud.google.com/go/firestore"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
)
type routerMessage struct {
Bucket string `json:"bucket"`
Name string `json:"name"`
EventType string `json:"event-type"`
MimosaType string `json:"mimosa-type"`
MimosaTypeVersion string `json:"mimosa-type-version"`
Workspace string `json:"workspace"`
}
type vulnerabilityDetails struct {
CvssV3Score string `json:"cvss_V3_score"`
Qid string `json:"qid"`
PatchAvailable bool `json:"patch_available"`
CvssTemporalScore string `json:"cvss_temporal_score"`
CvssV3TemporalScore string `json:"cvss_V3_temporal_score"`
Cves []string `json:"cves"`
Exploitable bool `json:"exploitable"`
Severity int `json:"severity"`
Title string `json:"title"`
Solution string `json:"solution"`
Summary string `json:"summary"`
CvssModifier float64 `json:"cvss_modifier"`
CvssScore string `json:"cvss_score"`
}
type vulnerability struct {
ID string `firestore:"id"`
Name string `firestore:"name"`
Score string `firestore:"score"`
Count int `firestore:"count"`
Hosts map[string]*affectedHost `firestore:"hosts"`
}
type affectedHost struct {
ID string `firestore:"id"`
Name string `firestore:"name"`
Hostname string `firestore:"hostname"`
}
// HandleMessage to find vulnerabilities
func HandleMessage(ctx context.Context, m *pubsub.Message) error {
// Unmarshal the message
var routerMessage routerMessage
err := json.Unmarshal(m.Data, &routerMessage)
if err != nil {
return fmt.Errorf("failed to unmarshal router message: %v", err)
}
log.Printf("router message: %v", routerMessage)
// FIXME Check version is supported
if routerMessage.MimosaTypeVersion == "" {
return fmt.Errorf("no version found in the router message: %v", err)
}
// Read object from the bucket
client, err := storage.NewClient(ctx)
if err != nil {
return err
}
obj := client.Bucket(routerMessage.Bucket).Object(routerMessage.Name)
rc, err := obj.NewReader(ctx)
if err != nil {
return err
}
defer rc.Close()
object, err := ioutil.ReadAll(rc)
if err != nil {
return err
}
// Get vulns from the object
host, vulns, err := convert(object)
if err != nil {
return err
}
// Calculate the host ID deterministically
hostID, err := generateDeterministicID(routerMessage.Bucket, routerMessage.Name)
if err != nil {
return err
}
// Firestore client
project := os.Getenv("MIMOSA_GCP_PROJECT")
if len(project) == 0 {
project = firestore.DetectProjectID
}
fc, err := firestore.NewClient(ctx, project)
if err != nil {
return err
}
// Qualys details are stored in this separate bucket
qualysBucket := os.Getenv("GCP_PROJECT") + "-qualys"
// Update each vuln to add this host
for vulnID := range vulns {
// FIXME this whole thing should be transactional
// Find the vuln doc
var doc *firestore.DocumentSnapshot
var ref *firestore.DocumentRef
var vulnerability vulnerability
iter := fc.Collection("ws").Doc(routerMessage.Workspace).Collection("vulns").Where("id", "==", vulnID).Limit(1).Documents(ctx)
doc, err = iter.Next()
if err == iterator.Done {
// Document doesn't exist
ref = fc.Collection("ws").Doc(routerMessage.Workspace).Collection("vulns").NewDoc()
vulnerability.ID = vulnID
vulnerabilityDetails, err := getVulnerabilityDetails(ctx, qualysBucket, vulnID)
if err != nil {
log.Printf("failed to load vulnerability details for vulnerability %s: %v", vulnID, err)
vulnerability.Name = "Unknown Vulnerability " + vulnID
} else {
vulnerability.Name = vulnerabilityDetails.Title
vulnerability.Score = vulnerabilityDetails.CvssScore
}
} else if err != nil {
// This is a real error
return err
} else {
ref = doc.Ref
err = doc.DataTo(&vulnerability)
if err != nil {
return err
}
}
// Add this host to the vuln and write back to Firestore if it is not already present
if vulnerability.Hosts == nil {
vulnerability.Hosts = map[string]*affectedHost{}
}
if vulnerability.Hosts[hostID] == nil {
vulnerability.Hosts[hostID] = host
vulnerability.Count = len(vulnerability.Hosts)
_, err = ref.Set(ctx, &vulnerability)
if err != nil {
log.Printf("error: failed to updated vuln document %s: %v", vulnID, err)
}
}
}
return err
}
func generateDeterministicID(bucketName, objectName string) (string, error) {
// Compute a deterministic hash to use as firestore ID
sha := sha1.New()
_, err := sha.Write([]byte(bucketName))
if err != nil {
return "", err
}
_, err = sha.Write([]byte(objectName))
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(sha.Sum(nil)), nil
}
func getVulnerabilityDetails(ctx context.Context, qualysBucket, vulnID string) (*vulnerabilityDetails, error) {
client, err := storage.NewClient(ctx)
if err != nil {
return nil, err
}
obj := client.Bucket(qualysBucket).Object(vulnID + ".json")
rc, err := obj.NewReader(ctx)
if err != nil {
return nil, err
}
defer rc.Close()
object, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
var vulnerabilityDetails vulnerabilityDetails
err = json.Unmarshal(object, &vulnerabilityDetails)
if err != nil {
return nil, err
}
return &vulnerabilityDetails, nil
}
| [
"\"MIMOSA_GCP_PROJECT\"",
"\"GCP_PROJECT\""
]
| []
| [
"GCP_PROJECT",
"MIMOSA_GCP_PROJECT"
]
| [] | ["GCP_PROJECT", "MIMOSA_GCP_PROJECT"] | go | 2 | 0 | |
libs/common.py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# apk add py-mysqldb or
import platform
import datetime
import time
import sys
import os
import MySQLdb
from sqlalchemy import create_engine
from sqlalchemy.types import NVARCHAR
from sqlalchemy import inspect
import tushare as ts
import pandas as pd
import traceback
# 使用环境变量获得数据库。兼容开发模式可docker模式。
MYSQL_HOST = os.environ.get('MYSQL_HOST') if (os.environ.get('MYSQL_HOST') != None) else "mariadb"
MYSQL_USER = os.environ.get('MYSQL_USER') if (os.environ.get('MYSQL_USER') != None) else "root"
MYSQL_PWD = os.environ.get('MYSQL_PWD') if (os.environ.get('MYSQL_PWD') != None) else "mariadb"
MYSQL_DB = os.environ.get('MYSQL_DB') if (os.environ.get('MYSQL_DB') != None) else "stock_data"
print("MYSQL_HOST :", MYSQL_HOST, ",MYSQL_USER :", MYSQL_USER, ",MYSQL_DB :", MYSQL_DB)
MYSQL_CONN_URL = "mysql+mysqldb://" + MYSQL_USER + ":" + MYSQL_PWD + "@" + MYSQL_HOST + "/" + MYSQL_DB + "?charset=utf8"
print("MYSQL_CONN_URL :", MYSQL_CONN_URL)
def engine():
engine = create_engine(
MYSQL_CONN_URL,
encoding='utf8', convert_unicode=True)
return engine
def conn():
db = MySQLdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PWD, MYSQL_DB, charset="utf8")
# db.autocommit(on=True)
return db
# 定义通用方法函数,插入数据库表,并创建数据库主键,保证重跑数据的时候索引唯一。
def insert_db(data, table_name, write_index, primary_keys):
# 定义engine
engine_mysql = engine()
# 使用 http://docs.sqlalchemy.org/en/latest/core/reflection.html
# 使用检查检查数据库表是否有主键。
insp = inspect(engine_mysql)
col_name_list = data.columns.tolist()
# 如果有索引,把索引增加到varchar上面。
if write_index:
# 插入到第一个位置:
col_name_list.insert(0, data.index.name)
print(col_name_list)
data.to_sql(name=table_name, con=engine_mysql, schema=MYSQL_DB, if_exists='append',
dtype={col_name: NVARCHAR(length=255) for col_name in col_name_list}, index=write_index)
# 判断是否存在主键
if insp.get_primary_keys(table_name) == []:
with engine_mysql.connect() as con:
# 执行数据库插入数据。
try:
con.execute('ALTER TABLE `%s` ADD PRIMARY KEY (%s);' % (table_name, primary_keys))
except Exception as e:
print("################## ADD PRIMARY KEY ERROR :", e)
# 插入数据。
def insert(sql, params=()):
with conn() as db:
print("insert sql:" + sql)
try:
db.execute(sql, params)
except Exception as e:
print("error :", e)
# 查询数据
def select(sql, params=()):
with conn() as db:
print("select sql:" + sql)
try:
db.execute(sql, params)
except Exception as e:
print("error :", e)
result = db.fetchall()
return result
# 计算数量
def select_count(sql, params=()):
with conn() as db:
print("select sql:" + sql)
try:
db.execute(sql, params)
except Exception as e:
print("error :", e)
result = db.fetchall()
# 只有一个数组中的第一个数据
if len(result) == 1:
return int(result[0][0])
else:
return 0
# 通用函数。获得日期参数。
def run_with_args(run_fun):
tmp_datetime_show = datetime.datetime.now() # 修改成默认是当日执行 + datetime.timedelta()
tmp_datetime_str = tmp_datetime_show.strftime("%Y-%m-%d %H:%M:%S.%f")
str_db = "MYSQL_HOST :" + MYSQL_HOST + ", MYSQL_USER :" + MYSQL_USER + ", MYSQL_DB :" + MYSQL_DB
print("\n######################### " + str_db + " ######################### ")
print("\n######################### begin run %s %s #########################" % (run_fun, tmp_datetime_str))
start = time.time()
# 要支持数据重跑机制,将日期传入。循环次数
if len(sys.argv) == 3:
# python xxx.py 2017-07-01 10
tmp_year, tmp_month, tmp_day = sys.argv[1].split("-")
loop = int(sys.argv[2])
tmp_datetime = datetime.datetime(int(tmp_year), int(tmp_month), int(tmp_day))
for i in range(0, loop):
# 循环插入多次数据,重复跑历史数据使用。
# time.sleep(5)
tmp_datetime_new = tmp_datetime + datetime.timedelta(days=i)
try:
run_fun(tmp_datetime_new)
except Exception as e:
print("error :", e)
traceback.print_exc()
elif len(sys.argv) == 2:
# python xxx.py 2017-07-01
tmp_year, tmp_month, tmp_day = sys.argv[1].split("-")
tmp_datetime = datetime.datetime(int(tmp_year), int(tmp_month), int(tmp_day))
try:
run_fun(tmp_datetime)
except Exception as e:
print("error :", e)
traceback.print_exc()
else:
# tmp_datetime = datetime.datetime.now() + datetime.timedelta(days=-1)
try:
run_fun(tmp_datetime_show) # 使用当前时间
except Exception as e:
print("error :", e)
traceback.print_exc()
print("######################### finish %s , use time: %s #########################" % (
tmp_datetime_str, time.time() - start))
# 设置基础目录,每次加载使用。
bash_stock_tmp = "/data/cache/hist_data_cache/%s/%s/"
if not os.path.exists(bash_stock_tmp):
os.makedirs(bash_stock_tmp) # 创建多个文件夹结构。
print("######################### init tmp dir #########################")
# 增加读取股票缓存方法。加快处理速度。
def get_hist_data_cache(code, date_start, date_end):
cache_dir = bash_stock_tmp % (date_end[0:7], date_end)
# 如果没有文件夹创建一个。月文件夹和日文件夹。方便删除。
# print("cache_dir:", cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = cache_dir + "%s^%s.gzip.pickle" % (date_end, code)
# 如果缓存存在就直接返回缓存数据。压缩方式。
if os.path.isfile(cache_file):
print("######### read from cache #########", cache_file)
return pd.read_pickle(cache_file, compression="gzip")
else:
print("######### get data, write cache #########", code, date_start, date_end)
stock = ts.get_hist_data(code, start=date_start, end=date_end)
if stock is None:
return None
stock = stock.sort_index(0) # 将数据按照日期排序下。
stock.to_pickle(cache_file, compression="gzip")
return stock
| []
| []
| [
"MYSQL_USER",
"MYSQL_PWD",
"MYSQL_DB",
"MYSQL_HOST"
]
| [] | ["MYSQL_USER", "MYSQL_PWD", "MYSQL_DB", "MYSQL_HOST"] | python | 4 | 0 | |
test/functional/test_framework/test_node.py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for namecoind node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import subprocess
import time
from .util import (
assert_equal,
base_node_args,
get_rpc_proxy,
rpc_url,
wait_until,
)
from .authproxy import JSONRPCException
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a namecoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
To make things easier for the test writer, a bit of magic is happening under the covers.
Any unrecognised messages will be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("BITCOIND", "namecoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "bitcoin-cli"), self.datadir)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
def __getattr__(self, *args, **kwargs):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return self.rpc.__getattr__(*args, **kwargs)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
base_args = base_node_args(self.index)
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
self.process = subprocess.Popen(self.args + extra_args + base_args, stderr=stderr)
self.running = True
self.log.debug("namecoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "namecoind exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to namecoind")
def get_wallet_rpc(self, wallet_name):
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes namecoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.args = []
self.binary = binary
self.datadir = datadir
self.input = None
def __call__(self, *args, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line args
self.args = [str(arg) for arg in args]
self.input = input
return self
def __getattr__(self, command):
def dispatcher(*args, **kwargs):
return self.send_cli(command, *args, **kwargs)
return dispatcher
def send_cli(self, command, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.args
if named_args:
p_args += ["-named"]
p_args += [command] + pos_args + named_args
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
return json.loads(cli_stdout, parse_float=decimal.Decimal)
| []
| []
| [
"BITCOINCLI",
"BITCOIND"
]
| [] | ["BITCOINCLI", "BITCOIND"] | python | 2 | 0 | |
pkg/initializer/initializer.go | package initializer
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/spf13/afero"
"github.com/giantswarm/e2e-harness/v3/pkg/harness"
)
const (
ProjectYamlContent = `version: 1
test:
env:
- "EXPECTED_KEY=expected_value"
- "TEST_USER=${USER}"
`
ClientGoContent = `// +build e2e
package e2e
import (
"github.com/giantswarm/microerror"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/giantswarm/e2e-harness/v3/pkg/harness"
)
func getK8sClient() (kubernetes.Interface, error) {
config, err := clientcmd.BuildConfigFromFlags("", harness.DefaultKubeConfig)
if err != nil {
return nil, microerror.Mask(err)
}
cs, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, microerror.Mask(err)
}
return cs, nil
}
`
ExampleTestGoContent = `// +build e2e
package e2e
import (
"os"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestZeroInitialPods(t *testing.T) {
cs, err := getK8sClient()
if err != nil {
t.Errorf("unexpected error %v", err)
}
pods, err := cs.CoreV1().Pods("default").List(metav1.ListOptions{})
if err != nil {
t.Errorf("unexpected error %v", err)
}
if len(pods.Items) != 0 {
t.Errorf("Unexpected number of pods, expected 0, got %d", len(pods.Items))
}
}
func TestEnvVars(t *testing.T) {
expected := "expected_value"
actual := os.Getenv("EXPECTED_KEY")
if expected != actual {
t.Errorf("unexpected value for EXPECTED_KEY, expected %q, got %q", expected, actual)
}
}
`
)
type fileDef struct {
name string
content string
}
type Initializer struct {
logger micrologger.Logger
fs afero.Fs
projectName string
}
func New(logger micrologger.Logger, fs afero.Fs, projectName string) *Initializer {
return &Initializer{
logger: logger,
fs: fs,
projectName: projectName,
}
}
func (i *Initializer) CreateLayout(ctx context.Context) error {
wd, err := os.Getwd()
if err != nil {
return microerror.Mask(err)
}
baseDir := filepath.Join(wd, harness.DefaultKubeConfig)
// return if base dir already exists.
if _, err := i.fs.Stat(baseDir); !os.IsNotExist(err) {
return fmt.Errorf("%s already exists", baseDir)
}
if err := i.fs.MkdirAll(baseDir, os.ModePerm); err != nil {
return microerror.Mask(err)
}
afs := &afero.Afero{Fs: i.fs}
files := []fileDef{
{
name: "project.yaml",
content: ProjectYamlContent,
},
{
name: "client.go",
content: ClientGoContent,
},
{
name: "example_test.go",
content: ExampleTestGoContent,
},
}
if err := i.writeFiles(files, baseDir, afs); err != nil {
return microerror.Mask(err)
}
return nil
}
func (i *Initializer) writeFiles(files []fileDef, baseDir string, afs *afero.Afero) error {
for _, f := range files {
path := filepath.Join(baseDir, f.name)
if err := afs.WriteFile(path, []byte(f.content), os.ModePerm); err != nil {
return microerror.Mask(err)
}
}
return nil
}
| [
"\"EXPECTED_KEY\""
]
| []
| [
"EXPECTED_KEY"
]
| [] | ["EXPECTED_KEY"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"database/sql"
"flag"
"os"
"strconv"
"time"
sq "github.com/Masterminds/squirrel"
"github.com/daskol/2chai/api"
"github.com/google/subcommands"
_ "github.com/lib/pq"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
var dsn string
func init() {
dsn = os.Getenv("DSN")
if dsn == "" {
dsn = "postgresql://[email protected]/2chai?sslmode=disable"
}
}
// listBoards реализует интерфейс subcommands.Commander для тго, чтобы вывести
// список возможныйх досок.
type listBoards struct{}
func (l *listBoards) Name() string { return "list-boards" }
func (l *listBoards) Synopsis() string { return "List avaliable boards." }
func (l *listBoards) Usage() string {
return "list-boards\n"
}
func (l *listBoards) SetFlags(_ *flag.FlagSet) {}
func (l *listBoards) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
if len(f.Args()) > 0 {
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
if lst, err := api.ListBoards(); err != nil {
log.Fatal(err)
} else {
log.Println(lst)
for i, board := range lst.Boards {
log.Printf("[%03d] %s\n", i+1, board)
}
}
return subcommands.ExitSuccess
}
// listThreads реализует интерфейс subcommands.Commander для того, чтобы можно
// было пролистать список нитей заданной доски.
type listThreads struct{}
func (l *listThreads) Name() string { return "list-threads" }
func (l *listThreads) Synopsis() string { return "List threads of the given board." }
func (l *listThreads) Usage() string {
return "list-threads BOARD\n"
}
func (l *listThreads) SetFlags(_ *flag.FlagSet) {}
func (l *listThreads) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
switch {
case len(f.Args()) < 1:
log.Println("Too few arguments.")
return subcommands.ExitUsageError
case len(f.Args()) > 1:
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
board := f.Args()[0]
if lst, err := api.ListThreadCatalog(board); err != nil {
log.Fatal(err)
} else {
log.Println(lst)
for i, thread := range lst.Threads {
log.Printf("[%03d] %s\n", i+1, thread)
}
}
return subcommands.ExitSuccess
}
// listPosts реализует интерфейс subcommands.Commander для того, чтобы вывести
// список постов, принадлежащих заданной ните.
type listPosts struct{}
func (l *listPosts) Name() string { return "list-posts" }
func (l *listPosts) Synopsis() string { return "list posts of the given thread" }
func (l *listPosts) Usage() string {
return "list-posts BOARD THREAD\n"
}
func (l *listPosts) SetFlags(_ *flag.FlagSet) {}
func (l *listPosts) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
switch {
case len(f.Args()) < 2:
log.Println("Too few arguments.")
return subcommands.ExitUsageError
case len(f.Args()) > 2:
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
board := f.Args()[0]
thread := f.Args()[1]
if posts, err := api.ListPosts(board, thread); err != nil {
log.Fatal(err)
} else {
for i, post := range posts {
log.Printf("[%03d] %s\n", i+1, post)
}
}
return subcommands.ExitSuccess
}
// syncAll реализует интерфейс subcommands.Commander для того, чтобы
// синхронизировать базу постов с некоторой переодичностью.
type syncAll struct{}
func (s *syncAll) Name() string { return "sync-all" }
func (s *syncAll) Synopsis() string { return "Synchronize all in background." }
func (s *syncAll) Usage() string {
return "sync-all\n"
}
func (s *syncAll) SetFlags(_ *flag.FlagSet) {}
func (s *syncAll) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
if len(f.Args()) > 0 {
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
log.Println("connect to database")
db, err := sql.Open("postgres", dsn)
if err != nil {
log.Fatal(err)
}
defer db.Close()
if err := db.Ping(); err != nil {
log.Fatal(err)
}
log.Println("enter in synchronization loop")
duration := 1200 * time.Second
timer := time.NewTimer(0)
defer timer.Stop()
for range timer.C {
timer.Reset(duration)
go func() {
if err := s.syncAll(db); err != nil {
log.Errorf("%s: skip iteration", err)
} else {
log.Info("done.")
}
}()
}
return subcommands.ExitSuccess
}
func (s *syncAll) syncAll(db *sql.DB) error {
log.Println("query list of known boards from database")
boardIDs, boardAbbrs, err := s.listBoards(db)
if err != nil {
return err
}
ch := make(chan int, len(boardIDs))
log.Println("synchronize all threads in all boards")
for i := range boardIDs {
ch <- i
}
worker := func() error {
for i := range ch {
boardID := boardIDs[i]
boardAbbr := boardAbbrs[i]
log.Infof("%03d request thread catalog of board %s",
i, boardAbbr)
if err := s.syncBoard(boardID, boardAbbr, db); err != nil {
return err
}
}
return nil
}
grp := errgroup.Group{}
grp.Go(worker)
grp.Go(worker)
grp.Go(worker)
close(ch)
return grp.Wait()
}
func (s *syncAll) listBoards(db *sql.DB) ([]int, []string, error) {
rows, err := sq.
Select("board_id", "abbr").
From("boards").
Where(sq.Eq{"watch": true}).
PlaceholderFormat(sq.Dollar).
RunWith(db).
Query()
if err != nil {
return nil, nil, err
}
defer rows.Close()
boardIDs := []int{}
boardAbbrs := []string{}
for rows.Next() {
boardID := 0
boardAbbr := ""
if err := rows.Scan(&boardID, &boardAbbr); err != nil {
return nil, nil, err
}
boardIDs = append(boardIDs, boardID)
boardAbbrs = append(boardAbbrs, boardAbbr)
}
return boardIDs, boardAbbrs, nil
}
func (s *syncAll) syncBoard(boardID int, boardAbbr string, db *sql.DB) error {
threads, err := api.ListThreadCatalog(boardAbbr)
if err != nil {
return err
}
if err := upsertThreads(boardID, threads, db); err != nil {
return err
}
for _, thread := range threads.Threads {
threadID := thread.Num
if err := s.syncThread(boardAbbr, boardID, threadID, db); err != nil {
return err
}
}
return nil
}
func (s *syncAll) syncThread(boardAbbr string, boardID, threadID int, db *sql.DB) error {
posts, err := api.ListPosts(boardAbbr, strconv.Itoa(threadID))
if err != nil {
return err
}
return upsertPosts(boardID, threadID, posts, db)
}
// syncBoards реализует интерфейс subcommands.Commander для того, чтобы
// наполнить базу данных списком доступных досок.
type syncBoards struct{}
func (s *syncBoards) Name() string { return "sync-boards" }
func (s *syncBoards) Synopsis() string { return "Synchronize avaliable boards." }
func (s *syncBoards) Usage() string {
return "sync-boards\n"
}
func (s *syncBoards) SetFlags(_ *flag.FlagSet) {}
func (s *syncBoards) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
if len(f.Args()) > 0 {
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
if lst, err := api.ListBoards(); err != nil {
log.Fatal(err)
} else if err := s.upsertBoards(lst); err != nil {
log.Fatal(err)
}
return subcommands.ExitSuccess
}
func (s *syncBoards) upsertBoards(boards *api.Boards) error {
log.Println("connect to database")
db, err := sql.Open("postgres", dsn)
if err != nil {
return err
}
defer db.Close()
if err := db.Ping(); err != nil {
return err
}
log.Println("prepare insert or update statement")
stmt := sq.
Insert("boards").
Columns("abbr", "name", "description").
Suffix("" +
"ON CONFLICT (abbr) " +
"DO UPDATE " +
"SET name = $2," +
" description = $3," +
" updated_at = CLOCK_TIMESTAMP()").
PlaceholderFormat(sq.Dollar).
RunWith(db)
for _, board := range boards.Boards {
stmt = stmt.Values(board.ID, board.Name, board.Info)
}
log.Println("execute statement")
if _, err := stmt.Exec(); err != nil {
return err
}
log.Println("done.")
return nil
}
// syncPosts реализует интерфейс subcommands.Commander для того, чтобы
// добавить новые нити или обновить существующие.
type syncPosts struct{}
func (s *syncPosts) Name() string { return "sync-posts" }
func (s *syncPosts) Synopsis() string { return "Synchronize posts of specified thread." }
func (s *syncPosts) Usage() string {
return "sync-posts BOARD THREAD\n"
}
func (s *syncPosts) SetFlags(_ *flag.FlagSet) {}
func (s *syncPosts) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
switch {
case len(f.Args()) < 2:
log.Println("Too few arguments.")
return subcommands.ExitUsageError
case len(f.Args()) > 2:
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
board := f.Args()[0]
thread := f.Args()[1]
if lst, err := api.ListPosts(board, thread); err != nil {
log.Fatal(err)
} else if err := s.upsertPosts(board, thread, lst); err != nil {
log.Fatal(err)
} else {
log.Info("done.")
}
return subcommands.ExitSuccess
}
func (s *syncPosts) upsertPosts(board, thread string, posts []*api.Post) error {
log.Println("connect to database")
db, err := sql.Open("postgres", dsn)
if err != nil {
return err
}
defer db.Close()
if err := db.Ping(); err != nil {
return err
}
log.Println("find identifier of board `" + board + "`")
boardID := 0
threadID, _ := strconv.Atoi(thread)
rowScanner := sq.
Select("board_id").
From("boards").
Where(sq.Eq{"abbr": board}).
PlaceholderFormat(sq.Dollar).
RunWith(db).
QueryRow()
if err := rowScanner.Scan(&boardID); err != nil {
return err
}
return upsertPosts(boardID, threadID, posts, db)
}
// syncThreads реализует интерфейс subcommands.Commander для того, чтобы
// добавить новые нити или обновить существующие.
type syncThreads struct{}
func (s *syncThreads) Name() string { return "sync-threads" }
func (s *syncThreads) Synopsis() string { return "Synchronize threads of specified board." }
func (s *syncThreads) Usage() string {
return "sync-threads BOARD\n"
}
func (s *syncThreads) SetFlags(_ *flag.FlagSet) {}
func (s *syncThreads) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
switch {
case len(f.Args()) < 1:
log.Println("Too few arguments.")
return subcommands.ExitUsageError
case len(f.Args()) > 1:
log.Println("Too many arguments.")
return subcommands.ExitUsageError
}
board := f.Args()[0]
if lst, err := api.ListThreadCatalog(board); err != nil {
log.Fatal(err)
} else if err := s.upsertThreads(board, lst); err != nil {
log.Fatal(err)
}
return subcommands.ExitSuccess
}
func (s *syncThreads) upsertThreads(board string, threads *api.Threads) error {
log.Println("connect to database")
db, err := sql.Open("postgres", dsn)
if err != nil {
return err
}
defer db.Close()
if err := db.Ping(); err != nil {
return err
}
log.Println("find identifier of board `" + board + "`")
boardID := 0
rowScanner := sq.
Select("board_id").
From("boards").
Where(sq.Eq{"abbr": board}).
PlaceholderFormat(sq.Dollar).
RunWith(db).
QueryRow()
if err := rowScanner.Scan(&boardID); err != nil {
return err
}
if err := upsertThreads(boardID, threads, db); err != nil {
return err
}
log.Println("done.")
return nil
}
func upsertThreads(boardID int, threads *api.Threads, db *sql.DB) error {
stmt := sq.
Insert("threads").
Columns("thread_id", "board_id", "subject", "created_at").
Suffix("ON CONFLICT (board_id, thread_id) DO NOTHING").
PlaceholderFormat(sq.Dollar).
RunWith(db)
for _, thread := range threads.Threads {
createdAt := time.Unix(thread.Timestamp, 0)
stmt = stmt.Values(thread.Num, boardID, thread.Subject, createdAt)
}
if _, err := stmt.Exec(); err != nil {
return err
}
return nil
}
func upsertPosts(boardID, threadID int, posts []*api.Post, db *sql.DB) error {
stmt := sq.
Insert("posts").
Columns("post_id", "thread_id", "board_id",
"ordinal", "op",
"author", "email", "subject", "comment",
"created_at").
Suffix("ON CONFLICT (board_id, post_id) DO NOTHING").
PlaceholderFormat(sq.Dollar).
RunWith(db)
for _, post := range posts {
createdAt := time.Unix(post.Timestamp, 0)
stmt = stmt.Values(post.Num, threadID, boardID,
post.Number, post.Op,
post.Name[:128], post.Email[:128], post.Subject, post.Comment[:16384],
createdAt)
}
if _, err := stmt.Exec(); err != nil {
return err
}
createdAt := time.Unix(posts[0].LastHit, 0)
_, err := sq.Update("threads").
Set("updated_at", createdAt).
Where(sq.Eq{"board_id": boardID, "thread_id": threadID}).
PlaceholderFormat(sq.Dollar).
RunWith(db).
Exec()
if err != nil {
return err
}
return nil
}
func main() {
subcommands.Register(subcommands.HelpCommand(), "")
subcommands.Register(subcommands.FlagsCommand(), "")
subcommands.Register(subcommands.CommandsCommand(), "")
subcommands.Register(&listBoards{}, "")
subcommands.Register(&listPosts{}, "")
subcommands.Register(&listThreads{}, "")
subcommands.Register(&syncAll{}, "")
subcommands.Register(&syncBoards{}, "")
subcommands.Register(&syncPosts{}, "")
subcommands.Register(&syncThreads{}, "")
flag.Parse()
os.Exit(int(subcommands.Execute(context.Background())))
}
| [
"\"DSN\""
]
| []
| [
"DSN"
]
| [] | ["DSN"] | go | 1 | 0 | |
martian/core/jobmanager_remote.go | // Copyright (c) 2020 10X Genomics, Inc. All rights reserved.
package core
import (
"bytes"
"context"
"math"
"os"
"os/exec"
"path"
"runtime/trace"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/martian-lang/martian/martian/util"
)
type RemoteJobManager struct {
jobMode string
jobResourcesMappings map[string]string
config jobManagerConfig
memGBPerCore int
maxJobs int
jobFreqMillis int
jobSem *MaxJobsSemaphore
limiter *time.Ticker
debug bool
queueMutex sync.Mutex
}
func NewRemoteJobManager(jobMode string, memGBPerCore int, maxJobs int, jobFreqMillis int,
jobResources string, config *JobManagerJson, debug bool) *RemoteJobManager {
self := &RemoteJobManager{}
self.jobMode = jobMode
self.memGBPerCore = memGBPerCore
self.maxJobs = maxJobs
self.jobFreqMillis = jobFreqMillis
self.debug = debug
self.config = verifyJobManager(jobMode, config, memGBPerCore)
// Parse jobresources mappings
self.jobResourcesMappings = map[string]string{}
for _, mapping := range strings.Split(jobResources, ";") {
if len(mapping) > 0 {
parts := strings.Split(mapping, ":")
if len(parts) == 2 {
self.jobResourcesMappings[parts[0]] = parts[1]
util.LogInfo("jobmngr", "Mapping %s to %s", parts[0], parts[1])
} else {
util.LogInfo("jobmngr", "Could not parse mapping: %s", mapping)
}
}
}
if self.maxJobs > 0 {
self.jobSem = NewMaxJobsSemaphore(self.maxJobs)
}
if self.jobFreqMillis > 0 {
self.limiter = time.NewTicker(time.Millisecond * time.Duration(self.jobFreqMillis))
} else {
// dummy limiter to keep struct OK
self.limiter = time.NewTicker(time.Millisecond * 1)
}
return self
}
func (self *RemoteJobManager) refreshResources(bool) error {
if self.jobSem != nil {
self.jobSem.FindDone()
}
return nil
}
func (self *RemoteJobManager) GetMaxCores() int {
return 0
}
func (self *RemoteJobManager) GetMaxMemGB() int {
return 0
}
func (self *RemoteJobManager) GetSettings() *JobManagerSettings {
return self.config.jobSettings
}
func (self *RemoteJobManager) GetSystemReqs(resRequest *JobResources) JobResources {
res := *resRequest
// Sanity check the thread count.
if res.Threads == 0 {
res.Threads = float64(self.config.jobSettings.ThreadsPerJob)
} else if res.Threads < 0 {
res.Threads = -res.Threads
}
// Sanity check memory requirements.
if res.MemGB < 0 {
// Negative request is a sentinel value requesting as much as possible.
// For remote jobs, at least for now, give reserve the minimum usable.
res.MemGB = -res.MemGB
}
if res.MemGB == 0 {
res.MemGB = float64(self.config.jobSettings.MemGBPerJob)
}
if res.VMemGB < 1 {
res.VMemGB = res.MemGB + float64(self.config.jobSettings.ExtraVmemGB)
}
// Compute threads needed based on memory requirements.
if self.memGBPerCore > 0 {
if threadsForMemory := res.MemGB /
float64(self.memGBPerCore); threadsForMemory > res.Threads {
res.Threads = threadsForMemory
}
}
// If threading is disabled, use only 1 thread.
if !self.config.threadingEnabled {
res.Threads = 1
} else {
// Remote job managers generally only support integer thread granularity.
res.Threads = math.Ceil(res.Threads)
}
return res
}
func (self *RemoteJobManager) execJob(shellCmd string, argv []string,
envs map[string]string, metadata *Metadata, resRequest *JobResources,
fqname string, shellName string, localpreflight bool) {
ctx, task := trace.NewTask(context.Background(), "queueRemote")
// no limit, send the job
if self.maxJobs <= 0 {
defer task.End()
self.sendJob(shellCmd, argv, envs,
metadata, resRequest,
fqname, shellName, ctx)
return
}
// grab job when ready, block until job state changes to a finalized state
go func() {
defer task.End()
if self.debug {
util.LogInfo("jobmngr", "Waiting for job: %s", fqname)
}
// if we want to try to put a more precise cap on cluster execution load,
// might be preferable to request num threads here instead of a slot per job
if success := self.jobSem.Acquire(metadata); !success {
return
}
if self.debug {
util.LogInfo("jobmngr", "Job sent: %s", fqname)
}
self.sendJob(shellCmd, argv, envs,
metadata, resRequest,
fqname, shellName, ctx)
}()
}
func (self *RemoteJobManager) endJob(metadata *Metadata) {
if self.jobSem != nil {
self.jobSem.Release(metadata)
}
}
func (self *RemoteJobManager) jobScript(
shellCmd string, argv []string, envs map[string]string,
metadata *Metadata,
resRequest *JobResources,
fqname, shellName string) string {
res := self.GetSystemReqs(resRequest)
// figure out per-thread memory requirements for the template.
// ceil to make sure that we're not starving a job.
vmemGBPerThread := int(math.Ceil(res.VMemGB / res.Threads))
if self.memGBPerCore > vmemGBPerThread {
vmemGBPerThread = self.memGBPerCore
}
memGBPerThread := vmemGBPerThread
if self.config.alwaysVmem && res.VMemGB > res.MemGB {
res.MemGB = res.VMemGB
} else {
memGBPerThread = int(math.Ceil(res.MemGB / res.Threads))
if self.memGBPerCore > memGBPerThread {
memGBPerThread = self.memGBPerCore
}
}
mappedJobResourcesOpt := ""
// If a __special is specified for this stage, and the runtime was called
// with MRO_JOBRESOURCES defining a mapping from __special to a complex value
// expression, then populate the resources option into the template. Otherwise,
// leave it blank to revert to default behavior.
if len(res.Special) > 0 {
if resources, ok := self.jobResourcesMappings[res.Special]; ok {
mappedJobResourcesOpt = strings.Replace(
self.config.jobResourcesOpt,
"__RESOURCES__", resources, 1)
}
}
threads := int(math.Ceil(res.Threads))
argsStr := formatArgs(threadEnvs(self, threads, envs), shellCmd, argv)
const prefix = "__MRO_"
const suffix = "__"
params := [...][2]string{
{prefix + "JOB_NAME" + suffix,
fqname + "." + shellName},
{prefix + "THREADS" + suffix,
strconv.Itoa(threads)},
{prefix + "STDOUT" + suffix,
shellSafeQuote(metadata.MetadataFilePath("stdout"))},
{prefix + "STDERR" + suffix,
shellSafeQuote(metadata.MetadataFilePath("stderr"))},
{prefix + "JOB_WORKDIR" + suffix,
shellSafeQuote(metadata.curFilesPath)},
{prefix + "CMD" + suffix,
argsStr},
{prefix + "MEM_GB" + suffix,
strconv.Itoa(int(math.Ceil(res.MemGB)))},
{prefix + "MEM_MB" + suffix,
strconv.Itoa(int(math.Ceil(res.MemGB * 1024)))},
{prefix + "MEM_KB" + suffix,
strconv.Itoa(int(math.Ceil(res.MemGB * 1024 * 1024)))},
{prefix + "MEM_B" + suffix,
strconv.Itoa(int(math.Ceil(res.MemGB * 1024 * 1024 * 1024)))},
{prefix + "MEM_GB_PER_THREAD" + suffix,
strconv.Itoa(memGBPerThread)},
{prefix + "MEM_MB_PER_THREAD" + suffix,
strconv.Itoa(memGBPerThread * 1024)},
{prefix + "MEM_KB_PER_THREAD" + suffix,
strconv.Itoa(memGBPerThread * 1024 * 1024)},
{prefix + "MEM_B_PER_THREAD" + suffix,
strconv.Itoa(memGBPerThread * 1024 * 1024 * 1024)},
{prefix + "VMEM_GB" + suffix,
strconv.Itoa(int(math.Ceil(res.VMemGB)))},
{prefix + "VMEM_MB" + suffix,
strconv.Itoa(int(math.Ceil(res.VMemGB * 1024)))},
{prefix + "VMEM_KB" + suffix,
strconv.Itoa(int(math.Ceil(res.VMemGB * 1024 * 1024)))},
{prefix + "VMEM_B" + suffix,
strconv.Itoa(int(math.Ceil(res.VMemGB * 1024 * 1024 * 1024)))},
{prefix + "VMEM_GB_PER_THREAD" + suffix,
strconv.Itoa(vmemGBPerThread)},
{prefix + "VMEM_MB_PER_THREAD" + suffix,
strconv.Itoa(vmemGBPerThread * 1024)},
{prefix + "VMEM_KB_PER_THREAD" + suffix,
strconv.Itoa(vmemGBPerThread * 1024 * 1024)},
{prefix + "VMEM_B_PER_THREAD" + suffix,
strconv.Itoa(vmemGBPerThread * 1024 * 1024 * 1024)},
{prefix + "ACCOUNT" + suffix,
os.Getenv("MRO_ACCOUNT")},
{prefix + "RESOURCES" + suffix,
mappedJobResourcesOpt},
}
template := self.config.jobTemplate
// Replace template annotations with actual values
args := make([]string, 0, 2*len(params))
for _, vals := range params {
rkey, val := vals[0], vals[1]
if len(val) > 0 {
args = append(args, rkey, val)
} else if strings.Contains(template, rkey) {
// Remove lines containing parameter from template
for _, line := range strings.Split(template, "\n") {
if strings.Contains(line, rkey) {
args = append(args, line, "")
}
}
}
}
r := strings.NewReplacer(args...)
return r.Replace(template)
}
// Format a shell command line to set environment variables and run the command.
//
// Handles quoting things as required.
func formatArgs(envs map[string]string, shellCmd string, argv []string) string {
// Estimate the size of the buffer that will be required.
argsLen := 9 + len(shellCmd)
for _, arg := range argv {
argsLen += 9 + len(arg)
}
envStrs := make([]string, 0, len(envs))
for k, v := range envs {
s := make([]byte, 0, len(k)+5+len(v))
s = append(s, k...)
s = append(s, '=')
s = appendShellSafeQuote(s, v)
argsLen += len(s) + 5
envStrs = append(envStrs, string(s))
}
// Ensure consistent ordering.
sort.Strings(envStrs)
argsStr := make([]byte, 0, argsLen)
for _, s := range envStrs {
argsStr = append(argsStr, s...)
argsStr = append(argsStr, " \\\n "...)
}
argsStr = appendShellSafeQuote(argsStr, shellCmd)
for _, arg := range argv {
argsStr = append(argsStr, " \\\n "...)
argsStr = appendShellSafeQuote(argsStr, arg)
}
return string(argsStr)
}
func (self *RemoteJobManager) sendJob(shellCmd string, argv []string, envs map[string]string,
metadata *Metadata, resRequest *JobResources, fqname string, shellName string,
ctx context.Context) {
jobscript := self.jobScript(shellCmd, argv, envs, metadata,
resRequest, fqname, shellName)
if err := metadata.WriteRaw("jobscript", jobscript); err != nil {
util.LogError(err, "jobmngr", "Could not write job script.")
}
cmd := exec.CommandContext(ctx, self.config.jobCmd, self.config.jobCmdArgs...)
cmd.Dir = metadata.curFilesPath
cmd.Stdin = strings.NewReader(jobscript)
// Regardless of the limiter rate, only allow one pending submission to the queue
// at a time. Otherwise there's a risk that if the submit command takes longer
// than jobFreqMillis, commands will still pile up. It's also a more "natural"
// way to limit the submit rate if the submit server can't keep up.
self.queueMutex.Lock()
defer self.queueMutex.Unlock()
if self.jobFreqMillis > 0 {
<-(self.limiter.C)
if self.debug {
util.LogInfo("jobmngr", "Job rate-limit released: %s", fqname)
}
}
util.EnterCriticalSection()
defer util.ExitCriticalSection()
if err := metadata.remove("queued_locally"); err != nil {
util.LogError(err, "jobmngr", "Error removing queue sentinel file.")
}
if output, err := cmd.CombinedOutput(); err != nil {
metadata.WriteErrorString(
"jobcmd error (" + err.Error() + "):\n" + string(output))
} else {
trimmed := bytes.TrimSpace(output)
// jobids should not have spaces in them. This is the most general way to
// check that a string is actually a jobid.
if len(trimmed) > 0 && !bytes.ContainsAny(trimmed, " \t\n\r") {
if err := metadata.WriteRawBytes("jobid", bytes.TrimSpace(output)); err != nil {
util.LogError(err, "jobmngr", "Could not write job id file.")
}
metadata.cache("jobid", metadata.uniquifier)
}
}
}
func (self *RemoteJobManager) checkQueue(ids []string, ctx context.Context) ([]string, string) {
if self.config.queueQueryCmd == "" {
return ids, ""
}
jobPath := util.RelPath(path.Join("..", "jobmanagers"))
cmd := exec.CommandContext(ctx, path.Join(jobPath, self.config.queueQueryCmd))
cmd.Dir = jobPath
cmd.Stdin = strings.NewReader(strings.Join(ids, "\n"))
var stderr bytes.Buffer
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return ids, stderr.String()
}
return strings.Split(string(output), "\n"), stderr.String()
}
func (self *RemoteJobManager) hasQueueCheck() bool {
return self.config.queueQueryCmd != ""
}
func (self *RemoteJobManager) queueCheckGrace() time.Duration {
return self.config.queueQueryGrace
}
| [
"\"MRO_ACCOUNT\""
]
| []
| [
"MRO_ACCOUNT"
]
| [] | ["MRO_ACCOUNT"] | go | 1 | 0 | |
DonorGrid/settings.py | """
Django settings for DonorGrid project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import sys
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# Configure django encryption key
SECRET_KEY = os.environ.get('SECRET', os.getrandom(32))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('PY_ENV', 'prod') == 'dev'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'corsheaders',
'django_extensions',
'Configuration.apps.ConfigurationConfig',
'Package.apps.PackageConfig',
'Donation.apps.DonationConfig',
'Donor.apps.DonorConfig',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsPostCsrfMiddleware',
]
ROOT_URLCONF = 'DonorGrid.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'Templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DonorGrid.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {'default': {}}
if DEBUG:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('DB_NAME', 'donorgrid'),
'HOST': os.environ.get('DB_HOST', 'db'),
'CONN_MAX_AGE': os.environ.get('DB_CONN_AGE', None),
'PASSWORD': os.environ.get('DB_PASSWORD', 'donorgrid'),
'PORT': os.environ.get('DB_PORT', 5432),
'USER': os.environ.get('DB_USER', 'donorgrid')
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR.joinpath('static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Setup base url of the website
BASE_URL = os.environ.get('BASE_URL', 'http://localhost:8000')
DONATION_REDIRECT_URL = os.environ.get('POST_DONATION_REDIRECT', BASE_URL)
# CORS configuration
CORS_ALLOW_ALL_ORIGINS = True
CORS_ALLOW_METHODS = ['GET', 'POST', 'OPTIONS']
CORS_ALLOW_CREDENTIALS = True
# Media files configuration
MEDIA_ROOT = BASE_DIR / 'uploads'
MEDIA_URL = '/uploads/'
| []
| []
| [
"POST_DONATION_REDIRECT",
"DB_PASSWORD",
"DB_HOST",
"BASE_URL",
"DB_PORT",
"SECRET",
"DB_NAME",
"DB_CONN_AGE",
"DB_USER",
"PY_ENV"
]
| [] | ["POST_DONATION_REDIRECT", "DB_PASSWORD", "DB_HOST", "BASE_URL", "DB_PORT", "SECRET", "DB_NAME", "DB_CONN_AGE", "DB_USER", "PY_ENV"] | python | 10 | 0 | |
cmd/roomserver-integration-tests/main.go | // Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"encoding/json"
"net/http"
"github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/test"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/roomserver/inthttp"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/gomatrixserverlib"
)
var (
// Path to where kafka is installed.
kafkaDir = defaulting(os.Getenv("KAFKA_DIR"), "kafka")
// The URI the kafka zookeeper is listening on.
zookeeperURI = defaulting(os.Getenv("ZOOKEEPER_URI"), "localhost:2181")
// The URI the kafka server is listening on.
kafkaURI = defaulting(os.Getenv("KAFKA_URIS"), "localhost:9092")
// How long to wait for the roomserver to write the expected output messages.
// This needs to be high enough to account for the time it takes to create
// the postgres database tables which can take a while on travis.
timeoutString = defaulting(os.Getenv("TIMEOUT"), "60s")
// Timeout for http client
timeoutHTTPClient = defaulting(os.Getenv("TIMEOUT_HTTP"), "30s")
// The name of maintenance database to connect to in order to create the test database.
postgresDatabase = defaulting(os.Getenv("POSTGRES_DATABASE"), "postgres")
// The name of the test database to create.
testDatabaseName = defaulting(os.Getenv("DATABASE_NAME"), "roomserver_test")
// The postgres connection config for connecting to the test database.
testDatabase = defaulting(os.Getenv("DATABASE"), fmt.Sprintf("dbname=%s binary_parameters=yes", testDatabaseName))
)
var exe = test.KafkaExecutor{
ZookeeperURI: zookeeperURI,
KafkaDirectory: kafkaDir,
KafkaURI: kafkaURI,
// Send stdout and stderr to our stderr so that we see error messages from
// the kafka process.
OutputWriter: os.Stderr,
}
func defaulting(value, defaultValue string) string {
if value == "" {
value = defaultValue
}
return value
}
var (
timeout time.Duration
timeoutHTTP time.Duration
)
func init() {
var err error
timeout, err = time.ParseDuration(timeoutString)
if err != nil {
panic(err)
}
timeoutHTTP, err = time.ParseDuration(timeoutHTTPClient)
if err != nil {
panic(err)
}
}
func createDatabase(database string) error {
cmd := exec.Command("psql", postgresDatabase)
cmd.Stdin = strings.NewReader(
fmt.Sprintf("DROP DATABASE IF EXISTS %s; CREATE DATABASE %s;", database, database),
)
// Send stdout and stderr to our stderr so that we see error messages from
// the psql process
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
return cmd.Run()
}
// runAndReadFromTopic runs a command and waits for a number of messages to be
// written to a kafka topic. It returns if the command exits, the number of
// messages is reached or after a timeout. It kills the command before it returns.
// It returns a list of the messages read from the command on success or an error
// on failure.
func runAndReadFromTopic(runCmd *exec.Cmd, readyURL string, doInput func(), topic string, count int, checkQueryAPI func()) ([]string, error) {
type result struct {
// data holds all of stdout on success.
data []byte
// err is set on failure.
err error
}
done := make(chan result)
readCmd := exec.Command(
filepath.Join(kafkaDir, "bin", "kafka-console-consumer.sh"),
"--bootstrap-server", kafkaURI,
"--topic", topic,
"--from-beginning",
"--max-messages", fmt.Sprintf("%d", count),
)
// Send stderr to our stderr so the user can see any error messages.
readCmd.Stderr = os.Stderr
// Kill both processes before we exit.
defer func() { runCmd.Process.Kill() }() // nolint: errcheck
defer func() { readCmd.Process.Kill() }() // nolint: errcheck
// Run the command, read the messages and wait for a timeout in parallel.
go func() {
// Read all of stdout.
defer func() {
if err := recover(); err != nil {
if errv, ok := err.(error); ok {
done <- result{nil, errv}
} else {
panic(err)
}
}
}()
data, err := readCmd.Output()
checkQueryAPI()
done <- result{data, err}
}()
go func() {
err := runCmd.Run()
done <- result{nil, err}
}()
go func() {
time.Sleep(timeout)
done <- result{nil, fmt.Errorf("Timeout reading %d messages from topic %q", count, topic)}
}()
// Poll the HTTP listener of the process waiting for it to be ready to receive requests.
ready := make(chan struct{})
go func() {
delay := 10 * time.Millisecond
for {
time.Sleep(delay)
if delay < 100*time.Millisecond {
delay *= 2
}
resp, err := http.Get(readyURL)
if err != nil {
continue
}
if resp.StatusCode == 200 {
break
}
}
ready <- struct{}{}
}()
// Wait for the roomserver to be ready to receive input or for it to crash.
select {
case <-ready:
case r := <-done:
return nil, r.err
}
// Write the input now that the server is running.
doInput()
// Wait for one of the tasks to finsh.
r := <-done
if r.err != nil {
return nil, r.err
}
// The kafka console consumer writes a newline character after each message.
// So we split on newline characters
lines := strings.Split(string(r.data), "\n")
if len(lines) > 0 {
// Remove the blank line at the end of the data.
lines = lines[:len(lines)-1]
}
return lines, nil
}
func writeToRoomServer(input []string, roomserverURL string) error {
var request api.InputRoomEventsRequest
var response api.InputRoomEventsResponse
var err error
request.InputRoomEvents = make([]api.InputRoomEvent, len(input))
for i := range input {
if err = json.Unmarshal([]byte(input[i]), &request.InputRoomEvents[i]); err != nil {
return err
}
}
x, err := inthttp.NewRoomserverClient(roomserverURL, &http.Client{Timeout: timeoutHTTP}, nil)
if err != nil {
return err
}
x.InputRoomEvents(context.Background(), &request, &response)
return response.Err()
}
// testRoomserver is used to run integration tests against a single roomserver.
// It creates new kafka topics for the input and output of the roomserver.
// It writes the input messages to the input kafka topic, formatting each message
// as canonical JSON so that it fits on a single line.
// It then runs the roomserver and waits for a number of messages to be written
// to the output topic.
// Once those messages have been written it runs the checkQueries function passing
// a api.RoomserverQueryAPI client. The caller can use this function to check the
// behaviour of the query API.
func testRoomserver(input []string, wantOutput []string, checkQueries func(api.RoomserverInternalAPI)) {
dir, err := ioutil.TempDir("", "room-server-test")
if err != nil {
panic(err)
}
cfg, _, err := test.MakeConfig(dir, kafkaURI, testDatabase, "localhost", 10000)
if err != nil {
panic(err)
}
if err = test.WriteConfig(cfg, dir); err != nil {
panic(err)
}
outputTopic := cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent)
err = exe.DeleteTopic(outputTopic)
if err != nil {
panic(err)
}
if err = exe.CreateTopic(outputTopic); err != nil {
panic(err)
}
if err = createDatabase(testDatabaseName); err != nil {
panic(err)
}
cache, err := caching.NewInMemoryLRUCache(false)
if err != nil {
panic(err)
}
doInput := func() {
fmt.Printf("Roomserver is ready to receive input, sending %d events\n", len(input))
if err = writeToRoomServer(input, cfg.RoomServerURL()); err != nil {
panic(err)
}
}
cmd := exec.Command(filepath.Join(filepath.Dir(os.Args[0]), "dendrite-room-server"))
// Append the roomserver config to the existing environment.
// We append to the environment rather than replacing so that any additional
// postgres and golang environment variables such as PGHOST are passed to
// the roomserver process.
cmd.Stderr = os.Stderr
cmd.Args = []string{"dendrite-room-server", "--config", filepath.Join(dir, test.ConfigFile)}
gotOutput, err := runAndReadFromTopic(cmd, cfg.RoomServerURL()+"/metrics", doInput, outputTopic, len(wantOutput), func() {
queryAPI, _ := inthttp.NewRoomserverClient("http://"+string(cfg.RoomServer.InternalAPI.Connect), &http.Client{Timeout: timeoutHTTP}, cache)
checkQueries(queryAPI)
})
if err != nil {
panic(err)
}
if len(wantOutput) != len(gotOutput) {
panic(fmt.Errorf("Wanted %d lines of output got %d lines", len(wantOutput), len(gotOutput)))
}
for i := range wantOutput {
if !equalJSON(wantOutput[i], gotOutput[i]) {
panic(fmt.Errorf("Wanted %q at index %d got %q", wantOutput[i], i, gotOutput[i]))
}
}
}
func equalJSON(a, b string) bool {
canonicalA, err := gomatrixserverlib.CanonicalJSON([]byte(a))
if err != nil {
panic(err)
}
canonicalB, err := gomatrixserverlib.CanonicalJSON([]byte(b))
if err != nil {
panic(err)
}
return string(canonicalA) == string(canonicalB)
}
func main() {
fmt.Println("==TESTING==", os.Args[0])
input := []string{
`{
"auth_event_ids": [],
"kind": 1,
"event": {
"origin": "matrix.org",
"signatures": {
"matrix.org": {
"ed25519:auto": "3kXGwNtdj+zqEXlI8PWLiB76xtrQ7SxcvPuXAEVCTo+QPoBoUvLi1RkHs6O5mDz7UzIowK5bi1seAN4vOh0OBA"
}
},
"origin_server_ts": 1463671337837,
"sender": "@richvdh:matrix.org",
"event_id": "$1463671337126266wrSBX:matrix.org",
"prev_events": [],
"state_key": "",
"content": {"creator": "@richvdh:matrix.org"},
"depth": 1,
"prev_state": [],
"room_id": "!HCXfdvrfksxuYnIFiJ:matrix.org",
"auth_events": [],
"hashes": {"sha256": "Q05VLC8nztN2tguy+KnHxxhitI95wK9NelnsDaXRqeo"},
"type": "m.room.create"}
}`, `{
"auth_event_ids": ["$1463671337126266wrSBX:matrix.org"],
"kind": 2,
"state_event_ids": ["$1463671337126266wrSBX:matrix.org"],
"event": {
"origin": "matrix.org",
"signatures": {
"matrix.org": {
"ed25519:auto": "a2b3xXYVPPFeG1sHCU3hmZnAaKqZFgzGZozijRGblG5Y//ewRPAn1A2mCrI2UM5I+0zqr70cNpHgF8bmNFu4BA"
}
},
"origin_server_ts": 1463671339844,
"sender": "@richvdh:matrix.org",
"event_id": "$1463671339126270PnVwC:matrix.org",
"prev_events": [[
"$1463671337126266wrSBX:matrix.org", {"sha256": "h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"}
]],
"membership": "join",
"state_key": "@richvdh:matrix.org",
"content": {
"membership": "join",
"avatar_url": "mxc://matrix.org/ZafPzsxMJtLaSaJXloBEKiws",
"displayname": "richvdh"
},
"depth": 2,
"prev_state": [],
"room_id": "!HCXfdvrfksxuYnIFiJ:matrix.org",
"auth_events": [[
"$1463671337126266wrSBX:matrix.org", {"sha256": "h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"}
]],
"hashes": {"sha256": "t9t3sZV1Eu0P9Jyrs7pge6UTa1zuTbRdVxeUHnrQVH0"},
"type": "m.room.member"},
"has_state": true
}`,
}
want := []string{
`{"type":"new_room_event","new_room_event":{
"event":{
"auth_events":[[
"$1463671337126266wrSBX:matrix.org",{"sha256":"h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"}
]],
"content":{
"avatar_url":"mxc://matrix.org/ZafPzsxMJtLaSaJXloBEKiws",
"displayname":"richvdh",
"membership":"join"
},
"depth": 2,
"event_id": "$1463671339126270PnVwC:matrix.org",
"hashes": {"sha256":"t9t3sZV1Eu0P9Jyrs7pge6UTa1zuTbRdVxeUHnrQVH0"},
"membership": "join",
"origin": "matrix.org",
"origin_server_ts": 1463671339844,
"prev_events": [[
"$1463671337126266wrSBX:matrix.org",{"sha256":"h/VS07u8KlMwT3Ee8JhpkC7sa1WUs0Srgs+l3iBv6c0"}
]],
"prev_state":[],
"room_id":"!HCXfdvrfksxuYnIFiJ:matrix.org",
"sender":"@richvdh:matrix.org",
"signatures":{
"matrix.org":{
"ed25519:auto":"a2b3xXYVPPFeG1sHCU3hmZnAaKqZFgzGZozijRGblG5Y//ewRPAn1A2mCrI2UM5I+0zqr70cNpHgF8bmNFu4BA"
}
},
"state_key":"@richvdh:matrix.org",
"type":"m.room.member"
},
"state_before_removes_event_ids":["$1463671339126270PnVwC:matrix.org"],
"state_before_adds_event_ids":null,
"latest_event_ids":["$1463671339126270PnVwC:matrix.org"],
"adds_state_event_ids":["$1463671337126266wrSBX:matrix.org", "$1463671339126270PnVwC:matrix.org"],
"removes_state_event_ids":null,
"last_sent_event_id":"",
"send_as_server":"",
"transaction_id": null
}}`,
}
testRoomserver(input, want, func(q api.RoomserverInternalAPI) {
var response api.QueryLatestEventsAndStateResponse
if err := q.QueryLatestEventsAndState(
context.Background(),
&api.QueryLatestEventsAndStateRequest{
RoomID: "!HCXfdvrfksxuYnIFiJ:matrix.org",
StateToFetch: []gomatrixserverlib.StateKeyTuple{
{EventType: "m.room.member", StateKey: "@richvdh:matrix.org"},
},
},
&response,
); err != nil {
panic(err)
}
if !response.RoomExists {
panic(fmt.Errorf(`Wanted room "!HCXfdvrfksxuYnIFiJ:matrix.org" to exist`))
}
if len(response.LatestEvents) != 1 || response.LatestEvents[0].EventID != "$1463671339126270PnVwC:matrix.org" {
panic(fmt.Errorf(`Wanted "$1463671339126270PnVwC:matrix.org" to be the latest event got %#v`, response.LatestEvents))
}
if len(response.StateEvents) != 1 || response.StateEvents[0].EventID() != "$1463671339126270PnVwC:matrix.org" {
panic(fmt.Errorf(`Wanted "$1463671339126270PnVwC:matrix.org" to be the state event got %#v`, response.StateEvents))
}
})
fmt.Println("==PASSED==", os.Args[0])
}
| [
"\"KAFKA_DIR\"",
"\"ZOOKEEPER_URI\"",
"\"KAFKA_URIS\"",
"\"TIMEOUT\"",
"\"TIMEOUT_HTTP\"",
"\"POSTGRES_DATABASE\"",
"\"DATABASE_NAME\"",
"\"DATABASE\""
]
| []
| [
"KAFKA_URIS",
"DATABASE_NAME",
"DATABASE",
"KAFKA_DIR",
"POSTGRES_DATABASE",
"TIMEOUT_HTTP",
"TIMEOUT",
"ZOOKEEPER_URI"
]
| [] | ["KAFKA_URIS", "DATABASE_NAME", "DATABASE", "KAFKA_DIR", "POSTGRES_DATABASE", "TIMEOUT_HTTP", "TIMEOUT", "ZOOKEEPER_URI"] | go | 8 | 0 | |
website/website.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/espians/viraltracing/website/asset"
"github.com/espians/viraltracing/website/frontpage"
"github.com/espians/viraltracing/website/site"
"github.com/espians/viraltracing/website/web"
)
func setupHandlers() {
web.EnsureHost("viraltracing.app")
asset.RegisterHandlers()
frontpage.RegisterHandlers()
site.RegisterHandlers()
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
setupHandlers()
if web.IsDev {
log.Printf("Listening on http://localhost:%s\n", port)
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), web.Mux))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
train_lmo.py | import torch.utils as utils
import argparse
import os
import random
import time
import numpy as np
import torch
import sys
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from datasets.tless.dataset_triplet import PoseDataset as PoseDataset_lmo
from datasets.linemod.dataset_lmo import PoseDataset as PoseDataset_linemod
from lib.network_lmo import PatchNet, PoseRefineNet
from lib.loss_tless import Loss
from lib.loss_refiner import Loss_refine
from lib.utils import setup_logger
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='lmo')
parser.add_argument('--dataset_root', type=str, default='/home/dell/yifeis/pose/bop_datasets/linemod/lmo/',
help='dataset root dir')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--workers', type=int, default=64, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help='learning rate')
parser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--w', default=0.015, help='learning rate')
parser.add_argument('--w_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--decay_margin', default=0.01, help='margin to decay lr & w')
parser.add_argument('--refine_margin', default=0.001, help='margin to start the training of iterative refinement')
parser.add_argument('--noise_trans', default=0.03,
help='range of the random noise of translation added to the training data')
parser.add_argument('--iteration', type=int, default=2, help='number of refinement iterations')
parser.add_argument('--nepoch', type=int, default=500, help='max numbesr of epochs to train')
parser.add_argument('--resume_posenet', type=str, default='', help='resume PoseNet model')#pose_model_2_193909.25539978288.pth
parser.add_argument('--resume_refinenet', type=str, default='', help='resume PoseRefineNet model')
parser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
proj_dir = os.getcwd()+'/'
torch.set_num_threads(64)
def main():
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset == 'lmo':
opt.num_objects = 8
opt.num_points = 2000
opt.outf = proj_dir +'trained_models/lmo/'
opt.log_dir = proj_dir +'experiments/logs/lmo/'
opt.repeat_epoch = 2
else:
print('Unknown dataset')
return
# torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=0, world_size=1)
estimator = PatchNet(num_obj=opt.num_objects)
# estimator = torch.nn.DataParallel(estimator)
estimator = estimator.cuda()
# estimator = torch.nn.parallel.DistributedDataParallel(estimator,find_unused_parameters=True)
total_params = sum(p.numel() for p in estimator.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in estimator.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
# print(estimator)
refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects)
refiner.cuda()
# utils.print_network(estimator)
if opt.resume_posenet != '':
estimator.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet)))
if opt.resume_refinenet != '':
refiner.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet)))
opt.refine_start = False # True
opt.decay_start = True
opt.lr *= opt.lr_rate
opt.w *= opt.w_rate
opt.batch_size = int(opt.batch_size / opt.iteration)
optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
else:
opt.refine_start = False
opt.decay_start = False
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr, weight_decay=0.01)
dataset = PoseDataset_linemod('train', opt.num_points, False, opt.dataset_root, opt.noise_trans,
opt.refine_start)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers,
pin_memory=True)
test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers,
pin_memory=True)
opt.sym_list = dataset.get_sym_list()
nosym_list = dataset.get_nosym_list()
rot_list = dataset.get_rot_list()
ref_list = dataset.get_ref_list()
opt.num_points_mesh = dataset.get_num_points_mesh()
print(
'>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(
len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))
criterion = Loss(opt.num_points_mesh, opt.sym_list,rot_list,ref_list,nosym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
best_test = np.Inf
st_time = time.time()
for epoch in range(opt.start_epoch, opt.nepoch):
logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
logger.info('Train time {0}'.format(
time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))
train_count = 0
train_dis_avg = 0.0
train_patch_avg = 0.0
train_norm_avg = 0.0
if opt.refine_start:
estimator.eval()
refiner.train()
else:
estimator.train()
optimizer.zero_grad()
for rep in range(opt.repeat_epoch):
for i, data in enumerate(testdataloader, 0):
points, choose, img, target_rt,target_trans, idx, \
choose_patchs,target_pt,model_points,normals,model_info,model_axis,_,_ = data
points, choose, img, target_rt, target_trans,idx,\
target_pt, model_points,normals,model_axis = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target_rt).cuda(), \
Variable(target_trans).cuda(),\
Variable(idx).cuda(), \
Variable(target_pt).cuda(),\
Variable(model_points).cuda(),\
Variable(normals).cuda(),\
Variable(model_axis).cuda()
normal_ls = []
for patch_id in range(len(choose_patchs)):
normal_ls.append(normals[0][choose_patchs[patch_id][0]])
pred_r, pred_t, pred_choose = estimator(img, points, choose, choose_patchs, idx)
loss, dis, norm_loss, patch_loss, r_pred, t_pred, _ = criterion(pred_r, pred_t, pred_choose, target_rt,
target_trans, idx, points,opt.w,
target_pt,model_points,
model_info)
if opt.refine_start:
dis.backward()
else:
loss.backward()
torch.cuda.empty_cache()
train_dis_avg += dis.item()
train_patch_avg += patch_loss.item()
train_norm_avg += norm_loss.item()
train_count += 1
if train_count % opt.batch_size == 0:
logger.info(
'Train time {0} Epoch {1} Batch {2} Frame {3} idx:{7} Avg_dis:{4} Avg_norm:{5} Avg_patch:{6}'.format(
time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch,
int(train_count / opt.batch_size), train_count, train_dis_avg / opt.batch_size,
train_norm_avg / opt.batch_size,
train_patch_avg / opt.batch_size,
idx))
optimizer.step()
optimizer.zero_grad()
train_dis_avg = 0
train_norm_avg = 0
train_patch_avg = 0
if train_count != 0 and train_count % 1000 == 0:
if opt.refine_start:
torch.save(refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format(opt.outf))
else:
torch.save(estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf))
print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format(epoch))
logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
logger.info('Test time {0}'.format(
time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
test_dis = 0.0
test_patch = 0.0
test_norm = 0.0
test_count = 0
estimator.eval()
refiner.eval()
for j, data in enumerate(testdataloader, 0):
points, choose, img, target_rt, target_trans, idx, \
choose_patchs, target_pt, model_points, normals, model_info, model_axis, _, _ = data
points, choose, img, target_rt, target_trans, idx, \
target_pt, model_points, normals, model_axis = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target_rt).cuda(), \
Variable(target_trans).cuda(), \
Variable(idx).cuda(), \
Variable(target_pt).cuda(), \
Variable(model_points).cuda(), \
Variable(normals).cuda(), \
Variable(model_axis).cuda()
normal_ls = []
for patch_id in range(len(choose_patchs)):
normal_ls.append(normals[0][choose_patchs[patch_id][0]])
pred_r, pred_t, pred_choose = estimator(img, points, choose, choose_patchs, idx)
loss, dis, norm_loss, patch_loss, r_pred, t_pred, _ = criterion(pred_r, pred_t, pred_choose, target_rt,
target_trans, idx, points, opt.w,
target_pt, model_points,
model_info)
test_dis += dis.item()
test_norm += norm_loss.item()
test_patch += patch_loss.item()
logger.info('Test time {0} Test Frame No.{1} idx:{5} dis:{2} norm_loss:{3} patch_loss:{4}'.format(
time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, dis, norm_loss,
patch_loss,idx))
test_count += 1
test_dis = test_dis / test_count
test_norm = test_norm / test_count
test_patch = test_patch / test_count
logger.info('Test time {0} Epoch {1} TEST FINISH Avg dis: {2} avg norm: {3} avg tless: {4}'.format(
time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_dis, test_norm, test_patch))
if test_dis <= best_test:
best_test = test_dis
if opt.refine_start:
torch.save(refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
else:
torch.save(estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<')
if best_test < opt.decay_margin and not opt.decay_start:
opt.decay_start = True
opt.lr *= opt.lr_rate
opt.w *= opt.w_rate
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
if best_test < opt.refine_margin and not opt.refine_start:
opt.refine_start = True
opt.batch_size = int(opt.batch_size / opt.iteration)
optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
dataset = PoseDataset_linemod('train', opt.num_points, False, opt.dataset_root, opt.noise_trans,
opt.refine_start)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers,
pin_memory=True)
test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False,
num_workers=opt.workers,
pin_memory=True)
opt.sym_list = dataset.get_sym_list()
opt.num_points_mesh = dataset.get_num_points_mesh()
print(
'>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(
len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
def displayPoint(data,target,view,title):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['axes.unicode_minus'] = False
while len(data[0]) > 20000:
print("too much point")
exit()
fig = plt.figure()
ax = Axes3D(fig)
ax.set_title(title)
ax.scatter3D(data[:,0], data[:,1], data[:,2], c='r', marker='.')
ax.scatter3D(target[:, 0], target[:, 1], target[:, 2], c='b', marker='.')
ax.scatter3D(view[:, 0], view[:, 1], view[:, 2], c='g', marker='.')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
plt.close()
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
support/db/dbtest/db.go | package dbtest
import (
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"strconv"
"strings"
"testing"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/AnneNamuli/go-stellar/support/db/sqlutils"
"github.com/AnneNamuli/go-stellar/support/errors"
"github.com/stretchr/testify/require"
)
// DB represents an ephemeral database that starts blank and can be used
// to run tests against.
type DB struct {
Dialect string
DSN string
dbName string
t *testing.T
closer func()
closed bool
}
// randomName returns a new psuedo-random name that is sufficient for naming a
// test database. In the event that reading from the source of randomness
// fails, a panic will occur.
func randomName() string {
raw := make([]byte, 6)
_, err := rand.Read(raw)
if err != nil {
err = errors.Wrap(err, "read from rand failed")
panic(err)
}
enc := hex.EncodeToString(raw)
return fmt.Sprintf("test_%s", enc)
}
// Close closes and deletes the database represented by `db`
func (db *DB) Close() {
if db.closed {
return
}
db.closer()
db.closed = true
}
// Load executes all of the statements in the provided sql script against the
// test database, panicking if any fail. The receiver is returned allowing for
// chain-style calling within your test functions.
func (db *DB) Load(sql string) *DB {
conn := db.Open()
defer conn.Close()
tx, err := conn.Begin()
require.NoError(db.t, err)
defer tx.Rollback()
for i, cmd := range sqlutils.AllStatements(sql) {
_, err = tx.Exec(cmd)
require.NoError(db.t, err, "failed execing statement: %d", i)
}
err = tx.Commit()
require.NoError(db.t, err)
return db
}
// Open opens a sqlx connection to the db.
func (db *DB) Open() *sqlx.DB {
conn, err := sqlx.Open(db.Dialect, db.DSN)
require.NoError(db.t, err)
return conn
}
func (db *DB) Version() (major int) {
conn := db.Open()
defer conn.Close()
versionFull := ""
err := conn.Get(&versionFull, "SHOW server_version")
require.NoError(db.t, err)
version := strings.Fields(versionFull)
parts := strings.Split(version[0], ".")
major, err = strconv.Atoi(parts[0])
require.NoError(db.t, err)
return major
}
func execStatement(t *testing.T, pguser, query string) {
db, err := sqlx.Open("postgres", fmt.Sprintf("postgres://%s@localhost/?sslmode=disable", pguser))
require.NoError(t, err)
_, err = db.Exec(query)
require.NoError(t, err)
require.NoError(t, db.Close())
}
// Postgres provisions a new, blank database with a random name on the localhost
// of the running process. It assumes that you have postgres running on the
// default port, have the command line postgres tools installed, and that the
// current user has access to the server. It panics on the event of a failure.
func Postgres(t *testing.T) *DB {
var result DB
result.dbName = randomName()
result.Dialect = "postgres"
result.t = t
t.Log("Test Database:", result.dbName)
pgUser := os.Getenv("PGUSER")
if len(pgUser) == 0 {
pgUser = "postgres"
}
// create the db
execStatement(t, pgUser, "CREATE DATABASE "+pq.QuoteIdentifier(result.dbName))
result.DSN = fmt.Sprintf("postgres://%s@localhost/%s?sslmode=disable&timezone=UTC", pgUser, result.dbName)
result.closer = func() {
execStatement(t, pgUser, "DROP DATABASE "+pq.QuoteIdentifier(result.dbName))
}
return &result
}
| [
"\"PGUSER\""
]
| []
| [
"PGUSER"
]
| [] | ["PGUSER"] | go | 1 | 0 | |
rules/fetch/git_checkout.py | import os
from abc import ABCMeta, abstractmethod, abstractproperty
from dataclasses import dataclass
from typing import ClassVar
from pants.engine.fs import Digest
from pants.engine.process import BinaryPath, BinaryPathRequest, BinaryPathTest, BinaryPaths, ProcessResult, Process
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.unions import union
from pants.subsystem.subsystem import Subsystem
@dataclass(frozen=True)
class Git:
inner: BinaryPath
def get_path(self) -> str:
return self.inner.path
@rule
async def collect_git() -> Git:
path_value = os.environ['PATH']
env_paths = path_value.split(':')
git_paths = await Get(
BinaryPaths,
BinaryPathRequest(
binary_name='git',
search_paths=env_paths,
test=BinaryPathTest(args=['--version']),
)
)
git_bin = git_paths.first_path
if git_bin is None:
raise OSError("Could not find 'git'. The user's PATH is: {path_value}.")
return Git(git_bin)
@dataclass(frozen=True)
class GitRepoRequest:
origin: str
@dataclass(frozen=True)
class GitRepoResult:
digest: Digest
@rule
async def git_clone_repo(git: Git, repo: GitRepoRequest) -> GitRepoResult:
shallow_clone = await Get(
ProcessResult,
Process(
argv=[
git.get_path(),
'clone',
'--depth=1',
repo.origin,
'known_clone',
],
output_directories=[
'known_clone',
],
)
)
return GitRepoResult(digest=shallow_clone.output_digest)
@dataclass(frozen=True)
class GitRevParseable:
spec: str
@dataclass(frozen=True)
class GitCheckoutRequest:
repo: GitRepoRequest
rev: GitRevParseable
@dataclass(frozen=True)
class GitCheckoutResult:
digest: Digest
@rule
async def checkout_rev(git: Git, checkout: GitCheckoutRequest) -> GitCheckoutResult:
repo = await Get(GitRepoResult, GitRepoRequest, checkout.repo)
checked_out = await Get(
ProcessResult,
Process(
argv=[
git.get_path(),
'checkout',
checkout.rev.spec,
]
input_digest=repo.digest,
output_directories=['.'],
)
)
return GitCheckoutResult(digest=checked_out.digest)
@union
class GitSourceTool(Subsystem, metaclass=ABCMeta):
"""Same as the docstring for `ExternalTool`.
TODO: upstream an ExternalToolBase which decouples the version selection from any specific URL.
"""
default_origin: str
default_version: ClassVar[str]
@classproperty
def name(cls):
"""The name of the tool, for use in user-facing messages.
Derived from the classname, but subclasses can override, e.g., with a classproperty.
"""
return cls.__name__.lower()
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--origin",
type=str,
default=cls.default_origin,
advanced=True,
help=f"Use this upstream for {cls.name}.",
)
register(
"--version",
type=str,
default=cls.default_version,
advanced=True,
help=f"Use this version of {cls.name}.",
)
@property
def origin(self) -> str:
return cast(str, self.options.origin)
@property
def version(self) -> str:
return cast(str, self.options.version)
def into_checkout_request(self) -> GitCheckoutRequest:
return GitCheckoutRequest(
repo=GitRepoRequest(origin=self.origin),
rev=GitRevParseable(spec=self.version),
)
@dataclass(unsafe_hash=True)
class GitSourceRequest:
# TODO: https://github.com/pantsbuild/pants/pull/8542 to avoid this wrapper struct!
inner: GitSourceTool
@rule
async def fetch_git(req: GitSourceRequest) -> GitCheckoutResult:
checkout_request = req.inner.into_checkout_request()
return await Get(GitCheckoutResult, GitCheckoutRequest, checkout_request)
def rules():
return [*collect_rules()]
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
autotest/autotest/wsgi.py | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Stephan Krause <[email protected]>
# Stephan Meissl <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2012 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
"""
WSGI config for EOxServer's autotest instance.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
path = "/var/eoxserver/autotest"
if path not in sys.path:
sys.path.append(path)
# NOTE: The Apache mod_wsgi, by default, shares the enviroment variables
# between different WSGI apps which leads to conflicts between
# multiple EOxServer instance. Therefore we cannot rely on the
# DJANGO_SETTINGS_MODULE enviromental variable we must always set it
# to the proper value.
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "autotest.settings")
os.environ["DJANGO_SETTINGS_MODULE"] = "autotest.settings"
# Initialize the EOxServer component system.
import eoxserver.core
eoxserver.core.initialize()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
setup.py | from setuptools import setup, find_packages
setup(
name='panlex_API',
version='1.2.0',
author="Maxwell Joslyn; Caroline Glazer; Gary Krug; Alex DelPriore; Ben Yang",
author_email="[email protected]",
py_modules=["panlex"],
url="https://github.com/longnow/panlex_python_API",
description='Python wrapper for PanLex API',
install_requires=['ratelimit','requests'],
classifiers=["Development Status :: 5 - Production/Stable", "Programming Language :: Python",
"Programming Language :: Python :: 3", "Operating System :: OS Independent",
"License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries :: Python Modules"]
)
| []
| []
| []
| [] | [] | python | null | null | null |
src/populate.py | print "Populating Earth..."
import datetime
from django.db import models
from django.utils import timezone
import os
import sys
import django
import time
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'charts.settings')
django.setup()
from charts.models import ECoG
from django.utils import timezone
x = 0
while True:
time.sleep(2)
q = ECoG(Value=x, Time=timezone.now())
q.save()
#print q.id
x = x+1;
print ECoG.objects.latest('id').id
queryset = ECoG.objects.all()
print([p.Time.strftime('%m/%d/%Y') for p in queryset])
print([str(p.Value) for p in queryset])
print "Never Reach me"
| []
| []
| []
| [] | [] | python | 0 | 0 | |
miamm/wsgi.py | """
WSGI config for mealmanager project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "mealmanager.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
collector/collector.py | from datetime import datetime
from influxdb import InfluxDBClient
import psutil
import socket
import json
import time
# Setup database client
client = InfluxDBClient(host='influxdb', port=8086)
client.switch_database('collector_metrics')
# defines the hostname where the metrics are running
# to be used as a tag in the database for querying
host = socket.gethostname()
while True:
# Increment of metrics shipped
time.sleep(1)
# Metrics collected listed here
time_val = datetime.now().isoformat(' ')
# CPU Related
cpu_val = psutil.cpu_percent(interval=1)
# Memory Related
mem_val = psutil.virtual_memory().percent
swap_mem_val = psutil.swap_memory().percent
# Disk Related
disk = psutil.disk_usage('/')
disk_percent = psutil.disk_usage('/').percent
# JSON objects for DB ingestion
# CPU Percent Metrics
cpu_payload = [
{
"measurement": "cpu_percent",
"tags": {
"host": host
},
"time": time_val,
"fields": {
"value": cpu_val
}
}
]
# Memory Percent Metrics
mem_payload = [
{
"measurement": "mem_percent",
"tags": {
"host": host
},
"time": time_val,
"fields": {
"value": mem_val
}
}
]
# Swap Memory Percent Metrics
swap_mem_payload = [
{
"measurement": "swap_mem_percent",
"tags": {
"host": host
},
"time": time_val,
"fields": {
"value": swap_mem_val
}
}
]
# Disk Metrics
disk_space_percent_payload = [
{
"measurement": "disk_percent",
"tags": {
"host": host
},
"time": time_val,
"fields": {
"value": disk_percent
}
}
]
# Writing data to influxdb
client.write_points(cpu_payload)
client.write_points(mem_payload)
client.write_points(swap_mem_payload)
client.write_points(disk_space_percent_payload) | []
| []
| []
| [] | [] | python | null | null | null |
main.go | package main
import (
"fmt"
"io/fs"
"io/ioutil"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
)
// Token Variables used for command line parameters
var (
Token string
)
func init() {
Token = os.Getenv("TOKEN")
}
func main() {
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
// Register the messageCreate func as a callback for MessageCreate events.
dg.AddHandler(messageCreate)
// In this example, we only care about receiving message events.
dg.Identify.Intents = discordgo.IntentsGuildMessages
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
status, err := ioutil.ReadFile("status.text")
if err != nil {
println("Could not read status.text file, " + err.Error())
}
err = dg.UpdateGameStatus(0, string(status))
if err != nil {
return
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session.
err = dg.Close()
if err != nil {
println(err)
}
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the authenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
// If the message is "ping" reply with "Pong!"
if m.Content == "e!ping" {
var _, err = s.ChannelMessageSendReply(
m.ChannelID,
strconv.FormatInt(s.HeartbeatLatency().Milliseconds(), 10)+"ms",
m.Reference())
if err != nil {
println(err.Error())
}
}
if strings.HasPrefix(m.Content, "e!status") && m.Author.ID == "861733561463603240" {
args := strings.Split(m.Content, " ")
args = append(args[:0], args[1:]...)
err := s.UpdateGameStatus(0, strings.Join(args, " "))
if err != nil {
err := SendWithSelfDelete(s, m.ChannelID, "Failed to update status: "+err.Error())
if err != nil {
return
}
}
err = ioutil.WriteFile("status.text", []byte(strings.Join(args, " ")), fs.FileMode(0777))
if err != nil {
err := SendWithSelfDelete(s, m.ChannelID, "Failed to write to file: "+err.Error())
if err != nil {
return
}
}
_, err = s.ChannelMessageSendReply(m.ChannelID, "Changed status to "+strings.Join(args, " "), m.Reference())
if err != nil {
return
}
}
if strings.HasPrefix(m.Content, "e!clean") {
p, err := s.UserChannelPermissions(m.Author.ID, m.ChannelID)
if err != nil {
fmt.Println(err.Error())
}
if !(p&discordgo.PermissionManageMessages == discordgo.PermissionManageMessages) {
err := SendWithSelfDelete(s, m.ChannelID, "You don't have permission to run that command!")
if err != nil {
return
}
}
args := strings.Split(m.Content, " ")
var count int
if len(args) >= 2 {
var err error
count, err = strconv.Atoi(strings.Split(m.Content, " ")[1])
if err != nil {
return
}
} else {
err := SendWithSelfDelete(s, m.ChannelID, "This command requires a count, e.g. `e!clean 10`")
if err != nil {
return
}
return
}
workingMessage, err := s.ChannelMessageSend(m.ChannelID, "Cleaning channel...")
if err != nil {
println(err.Error())
}
requestsNeeded := count / 100
for i := 1; i < requestsNeeded; i++ {
println(count / requestsNeeded)
messages, err := s.ChannelMessages(m.ChannelID, count, m.ID, "", "")
var messageIDs = make([]string, 0)
messageIDs = append(messageIDs, m.ID)
for _, message := range messages {
messageIDs = append(messageIDs, message.ID)
}
if err != nil {
println(err.Error())
}
err = s.ChannelMessagesBulkDelete(
m.ChannelID,
messageIDs)
if err != nil {
println(err.Error())
}
}
messages, err := s.ChannelMessages(m.ChannelID, count%100, m.ID, "", "")
var messageIDs = make([]string, 0)
messageIDs = append(messageIDs, m.ID)
for _, message := range messages {
messageIDs = append(messageIDs, message.ID)
}
if err != nil {
println(err.Error())
}
err = s.ChannelMessagesBulkDelete(
m.ChannelID,
messageIDs)
if err != nil {
println(err.Error())
}
err = s.ChannelMessageDelete(workingMessage.ChannelID, workingMessage.ID)
if err != nil {
return
}
err = SendWithSelfDelete(s, m.ChannelID, "Cleaned channel!")
if err != nil {
return
}
}
}
func SendWithSelfDelete(ds *discordgo.Session, channelId, message string) error {
m, err := ds.ChannelMessageSend(channelId, message)
if err != nil {
return err
}
go func(ch, id string, session *discordgo.Session) {
<-time.After(10 * time.Second)
_ = ds.ChannelMessageDelete(channelId, m.ID)
}(channelId, m.ID, ds)
return nil
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | go | 1 | 0 | |
inbound/inbound.go | // +build !confonly
package inbound
import (
"context"
"log"
"os"
"v2ray.com/core/common"
"v2ray.com/core/proxy/vmess/inbound"
)
func init() {
// 取消 vmess inbound 的注册
common.UnRegisterConfig((*inbound.Config)(nil))
common.Must(common.RegisterConfig((*inbound.Config)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {
endpoint := os.Getenv("ThriftUserValidatorEndpoint")
if endpoint == "" {
log.Fatal("env ThriftUserValidatorEndpoint is required")
}
remoteUserValidator := NewRemoteUserValidator(RemoteUserValidatorOptions{
RemoteURL: endpoint,
})
options := inbound.HandlerOptions{
Clients: remoteUserValidator,
}
// 返回修改了用户验证模块后的 vmess inbound
return inbound.New(ctx, config.(*inbound.Config), options)
}))
}
| [
"\"ThriftUserValidatorEndpoint\""
]
| []
| [
"ThriftUserValidatorEndpoint"
]
| [] | ["ThriftUserValidatorEndpoint"] | go | 1 | 0 | |
assets/cloudformation/function_copy_website/lambda_function.py | import json
import boto3
from crhelper import CfnResource
import os
helper = CfnResource()
s3 = boto3.client('s3')
s3_resource = boto3.resource('s3')
sourceBucket = os.environ['s3sourceBucket']
sourcePrefix = os.environ['s3sourcePrefix']
destinationbucket = os.environ['s3destinationBucket']
def lambda_handler(event, context):
helper(event, context)
@helper.create
@helper.update
def copy_website(event, _):
bucket = s3_resource.Bucket(sourceBucket)
for object in bucket.objects.filter(Prefix=sourcePrefix):
file = object.key
try:
copy_source = {'Bucket': sourceBucket, 'Key': file}
s3_resource.meta.client.copy(
copy_source, destinationbucket, file.replace(sourcePrefix, ""))
except:
print("An exception occurred copying: " + file)
@helper.delete
def delete_website(_, __):
for object in s3_resource.Bucket(destinationbucket).objects.all():
s3.delete_object(Bucket=destinationbucket, Key=object.key)
bucket = s3_resource.Bucket(destinationbucket)
bucket.object_versions.delete()
| []
| []
| [
"s3sourceBucket",
"s3sourcePrefix",
"s3destinationBucket"
]
| [] | ["s3sourceBucket", "s3sourcePrefix", "s3destinationBucket"] | python | 3 | 0 | |
runtime/bindings/python/tests/conftest.py | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
import tests
from pathlib import Path
def image_path():
path_to_repo = os.environ["DATA_PATH"]
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
return path_to_img
def model_path(is_myriad=False):
path_to_repo = os.environ["MODELS_PATH"]
if not is_myriad:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
else:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
return (test_xml, test_bin)
def model_onnx_path():
path_to_repo = os.environ["MODELS_PATH"]
test_onnx = os.path.join(path_to_repo, "models", "test_model", "test_model.onnx")
return test_onnx
def plugins_path():
path_to_repo = os.environ["DATA_PATH"]
plugins_xml = os.path.join(path_to_repo, "ie_class", "plugins.xml")
plugins_win_xml = os.path.join(path_to_repo, "ie_class", "plugins_win.xml")
plugins_osx_xml = os.path.join(path_to_repo, "ie_class", "plugins_apple.xml")
return (plugins_xml, plugins_win_xml, plugins_osx_xml)
def _get_default_model_zoo_dir():
return Path(os.getenv("ONNX_HOME", Path.home() / ".onnx/model_zoo"))
def pytest_addoption(parser):
parser.addoption(
"--backend",
default="CPU",
choices=["CPU", "GPU", "HDDL", "MYRIAD", "HETERO", "TEMPLATE"],
help="Select target device",
)
parser.addoption(
"--model_zoo_dir",
default=_get_default_model_zoo_dir(),
type=str,
help="location of the model zoo",
)
parser.addoption(
"--model_zoo_xfail",
action="store_true",
help="treat model zoo known issues as xfails instead of failures",
)
def pytest_configure(config):
backend_name = config.getvalue("backend")
tests.BACKEND_NAME = backend_name
tests.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir"))
tests.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail")
# register additional markers
config.addinivalue_line("markers", "skip_on_cpu: Skip test on CPU")
config.addinivalue_line("markers", "skip_on_gpu: Skip test on GPU")
config.addinivalue_line("markers", "skip_on_hddl: Skip test on HDDL")
config.addinivalue_line("markers", "skip_on_myriad: Skip test on MYRIAD")
config.addinivalue_line("markers", "skip_on_hetero: Skip test on HETERO")
config.addinivalue_line("markers", "skip_on_template: Skip test on TEMPLATE")
config.addinivalue_line("markers", "onnx_coverage: Collect ONNX operator coverage")
config.addinivalue_line("markers", "dynamic_library: Runs tests only in dynamic libraries case")
def pytest_collection_modifyitems(config, items):
backend_name = config.getvalue("backend")
tests.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir"))
tests.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail")
keywords = {
"CPU": "skip_on_cpu",
"GPU": "skip_on_gpu",
"HDDL": "skip_on_hddl",
"MYRIAD": "skip_on_myriad",
"HETERO": "skip_on_hetero",
"TEMPLATE": "skip_on_template",
}
skip_markers = {
"CPU": pytest.mark.skip(reason="Skipping test on the CPU backend."),
"GPU": pytest.mark.skip(reason="Skipping test on the GPU backend."),
"HDDL": pytest.mark.skip(reason="Skipping test on the HDDL backend."),
"MYRIAD": pytest.mark.skip(reason="Skipping test on the MYRIAD backend."),
"HETERO": pytest.mark.skip(reason="Skipping test on the HETERO backend."),
"TEMPLATE": pytest.mark.skip(reason="Skipping test on the TEMPLATE backend."),
}
for item in items:
skip_this_backend = keywords[backend_name]
if skip_this_backend in item.keywords:
item.add_marker(skip_markers[backend_name])
@pytest.fixture(scope="session")
def device():
return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU"
| []
| []
| [
"DATA_PATH",
"ONNX_HOME",
"TEST_DEVICE",
"MODELS_PATH"
]
| [] | ["DATA_PATH", "ONNX_HOME", "TEST_DEVICE", "MODELS_PATH"] | python | 4 | 0 | |
kolibri/utils/conf.py | """
Kolibri configuration data
==========================
.. warning::
Do not load any django.conf.settings stuff here. This configuration data
precedes loading of settings, it is not part of the settings stack.
TODO: We need to figure out our conf API. Do we store in ini/json/yaml?
* How do we retrieve config data?
* When should configuration files be loaded and written?
This module should be easier to document, for instance by having VARIABLES
instead of a dict.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import os
from django.utils.functional import SimpleLazyObject
from .compat import module_exists
logger = logging.getLogger(__name__)
# use default OS encoding
with open(os.path.join(os.path.dirname(__file__), "KOLIBRI_CORE_JS_NAME")) as f:
KOLIBRI_CORE_JS_NAME = f.read().strip()
#: Absolute path of the main user data directory.
#: Will be created automatically if it doesn't exist.
KOLIBRI_HOME = os.path.abspath(os.path.expanduser(os.environ["KOLIBRI_HOME"]))
# Creating KOLIBRI_HOME atm. has to happen here as for instance utils.cli is not
# called through py.test. This file is the first basic entry point of
# Kolibri, although utils.cli may or may not precede it.
if not os.path.exists(KOLIBRI_HOME):
parent = os.path.dirname(KOLIBRI_HOME)
if not os.path.exists(parent):
raise RuntimeError(
"The parent of your KOLIBRI_HOME does not exist: {}".format(parent)
)
os.mkdir(KOLIBRI_HOME)
# Create a folder named logs inside KOLIBRI_HOME to store all the log files.
LOG_ROOT = os.path.join(KOLIBRI_HOME, "logs")
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
try:
# The default list for this is populated from build_tools/default_plugins.txt
# in the root of the Kolibri repository. The default list is identical to the list below,
# except that the style_guide plugin is not enabled in production builds.
# Caveat: this list may have been changed at build time to specify a different list of plugins.
from .build_config.default_plugins import plugins
DEFAULT_PLUGINS = plugins
except ImportError:
DEFAULT_PLUGINS = [
"kolibri.plugins.facility_management",
"kolibri.plugins.device_management",
"kolibri.plugins.learn",
"kolibri.plugins.document_pdf_render",
"kolibri.plugins.html5_app_renderer",
"kolibri.plugins.media_player",
"kolibri.plugins.setup_wizard",
"kolibri.plugins.coach",
"kolibri.plugins.user",
"kolibri_exercise_perseus_plugin",
"kolibri.plugins.style_guide",
"kolibri.plugins.document_epub_render",
"kolibri.plugins.default_theme",
]
conf_file = os.path.join(KOLIBRI_HOME, "kolibri_settings.json")
# These values are encoded on the config dict as sets
# so they need to be treated specially for serialization
# and deserialization to/from JSON
SET_KEYS = ["INSTALLED_APPS", "DISABLED_APPS"]
class ConfigDict(dict):
def __init__(self):
# If the settings file does not exist or does not contain
# valid JSON then create it
self.update(
{
#: Everything in this list is added to django.conf.settings.INSTALLED_APPS
# except disabled ones below
"INSTALLED_APPS": DEFAULT_PLUGINS,
#: Everything in this list is removed from the list above
"DISABLED_APPS": [],
}
)
if os.path.isfile(conf_file):
try:
# Open up the config file and load settings
# use default OS encoding
with open(conf_file, "r") as kolibri_conf_file:
self.update(json.load(kolibri_conf_file))
return
except json.JSONDecodeError:
logger.warn(
"Attempted to load kolibri_settings.json but encountered a file that could not be decoded as valid JSON."
)
logger.info("Initialize kolibri_settings.json..")
self.save()
@property
def ACTIVE_PLUGINS(self):
return list(self["INSTALLED_APPS"] - self["DISABLED_APPS"])
def update(self, new_values):
"""
Updates current configuration with ``new_values``. Does not save to file.
"""
values_copy = new_values.copy()
for key in SET_KEYS:
if key in values_copy:
values_copy[key] = set(values_copy[key])
super(ConfigDict, self).update(values_copy)
def save(self):
# use default OS encoding
config_copy = self.copy()
for key in SET_KEYS:
if key in config_copy:
config_copy[key] = list(config_copy[key])
with open(conf_file, "w") as kolibri_conf_file:
json.dump(config_copy, kolibri_conf_file, indent=2, sort_keys=True)
def autoremove_unavailable_plugins(self):
"""
Sanitize INSTALLED_APPS - something that should be done separately for all
build in plugins, but we should not auto-remove plugins that are actually
configured by the user or some other kind of hard dependency that should
make execution stop if not loadable.
"""
changed = False
# Iterate over a copy of the set so that it is not modified during the loop
for module_path in self["INSTALLED_APPS"].copy():
if not module_exists(module_path):
self["INSTALLED_APPS"].remove(module_path)
logger.error(
(
"Plugin {mod} not found and disabled. To re-enable it, run:\n"
" $ kolibri plugin {mod} enable"
).format(mod=module_path)
)
changed = True
if changed:
self.save()
def enable_default_plugins(self):
"""
Enable new plugins that have been added between versions
This will have the undesired side effect of reactivating
default plugins that have been explicitly disabled by a user,
in versions prior to the implementation of a plugin blacklist.
"""
changed = False
for module_path in DEFAULT_PLUGINS:
if module_path not in self["INSTALLED_APPS"]:
self["INSTALLED_APPS"].add(module_path)
# Can be migrated to upgrade only logic
if module_path not in self["DISABLED_APPS"]:
logger.warning(
(
"Default plugin {mod} not found in configuration. To re-disable it, run:\n"
" $ kolibri plugin {mod} disable"
).format(mod=module_path)
)
changed = True
if changed:
self.save()
def enable_plugin(self, module_path):
self["INSTALLED_APPS"].add(module_path)
try:
self["DISABLED_APPS"].remove(module_path)
except KeyError:
pass
def disable_plugin(self, module_path):
self["DISABLED_APPS"].add(module_path)
try:
self["INSTALLED_APPS"].remove(module_path)
except KeyError:
pass
#: Set defaults before updating the dict
config = ConfigDict()
def __initialize_options():
# read the config file options in here so they can be accessed from a standard location
from .options import read_options_file
return read_options_file(KOLIBRI_HOME)
OPTIONS = SimpleLazyObject(__initialize_options)
| []
| []
| [
"KOLIBRI_HOME"
]
| [] | ["KOLIBRI_HOME"] | python | 1 | 0 | |
coverage/calc/delta.go | package calc
import (
"fmt"
"k8s.io/test-infra/coverage/githubUtil"
"k8s.io/test-infra/coverage/str"
"log"
"os"
"sort"
"strings"
)
type Incremental struct {
base Coverage
new Coverage
}
func (inc Incremental) delta() float32 {
baseRatio, _ := inc.base.Ratio()
newRatio, _ := inc.new.Ratio()
return newRatio - baseRatio
}
func (inc Incremental) Delta() string {
return str.PercentStr(inc.delta())
}
func (inc Incremental) deltaForCovbot() string {
if inc.base.nAllStmts == 0 {
return ""
}
return str.PercentageForCovbotDelta(inc.delta())
}
func (inc Incremental) oldCovForCovbot() string {
if inc.base.nAllStmts == 0 {
return "Do not exist"
}
return inc.base.Percentage()
}
func (inc Incremental) String() string {
return fmt.Sprintf("<%s> (%d / %d) %s ->(%d / %d) %s", inc.base.Name(),
inc.base.nCoveredStmts, inc.base.nAllStmts, inc.base.Percentage(),
inc.new.nCoveredStmts, inc.new.nAllStmts, inc.new.Percentage())
}
type GroupChanges struct {
Added []Coverage
Deleted []Coverage
Unchanged []Coverage
Changed []Incremental
BaseGroup *CoverageList
NewGroup *CoverageList
}
func sorted(m map[string]Coverage) (result []Coverage) {
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
result = append(result, m[k])
}
return
}
// NewGroupChanges compares the newList of coverage against the base list and
// returns the result
func NewGroupChanges(baseList *CoverageList, newList *CoverageList) *GroupChanges {
var added, unchanged []Coverage
var changed []Incremental
baseFilesMap := baseList.Map()
for _, newCov := range newList.group {
newCovName := newCov.Name()
baseCov, ok := baseFilesMap[newCovName]
isNewFile := false
if !ok {
added = append(added, newCov)
baseCov = *newCoverage(newCovName)
isNewFile = true
}
// after all the deletions, the leftover would be the elements that only exists in base group,
// in other words, the files that is deleted in the new group
delete(baseFilesMap, newCovName)
incremental := Incremental{baseCov, newCov}
delta := incremental.delta()
if delta == 0 && !isNewFile {
unchanged = append(unchanged, newCov)
} else {
changed = append(changed, incremental)
}
}
return &GroupChanges{Added: added, Deleted: sorted(baseFilesMap), Unchanged: unchanged,
Changed: changed, BaseGroup: baseList, NewGroup: newList}
}
// processChangedFiles checks each entry in GroupChanges and see if it is
// include in the github commit. If yes, then include that in the covbot report
func (changes *GroupChanges) processChangedFiles(
githubFilePaths *map[string]bool, rows *[]string, isEmpty,
isCoverageLow *bool) {
log.Printf("\nFinding joining set of changed files from profile[count=%d"+
"] & github\n", len(changes.Changed))
covThres := changes.NewGroup.covThresholdInt
for i, inc := range changes.Changed {
pathFromProfile := githubUtil.FilePathProfileToGithub(inc.base.Name())
fmt.Printf("checking if this file is in github change list: %s", pathFromProfile)
if (*githubFilePaths)[pathFromProfile] == true {
fmt.Printf("\tYes!\n")
*rows = append(*rows, inc.githubBotRow(i, pathFromProfile))
*isEmpty = false
if inc.new.IsCoverageLow(covThres) {
*isCoverageLow = true
}
} else {
fmt.Printf("\tNo\n")
}
}
fmt.Println("End of Finding joining set of changed files from profile & github")
return
}
func (inc Incremental) filePathWithHyperlink(filepath string) string {
return fmt.Sprintf("[%s](%s)", filepath, inc.new.lineCovLink)
}
// githubBotRow returns a string as the content of a row covbot posts
func (inc Incremental) githubBotRow(index int, filepath string) string {
return fmt.Sprintf("%s | %s | %s | %s",
inc.filePathWithHyperlink(filepath), inc.oldCovForCovbot(),
inc.new.Percentage(), inc.deltaForCovbot())
}
// ContentForGithubPost constructs the message covbot posts
func (changes *GroupChanges) ContentForGithubPost(files *map[string]bool) (
res string, isEmpty, isCoverageLow bool) {
jobName := os.Getenv("JOB_NAME")
rows := []string{
"The following is the coverage report on pkg/.",
fmt.Sprintf("Say `/test %s` to re-run this coverage report", jobName),
"",
"File | Old Coverage | New Coverage | Delta",
"---- |:------------:|:------------:|:-----:",
}
fmt.Printf("\n%d files changed, reported by github:\n", len(*files))
for githubFilePath := range *files {
fmt.Printf("%s\t", githubFilePath)
}
fmt.Printf("\n\n")
isEmpty = true
isCoverageLow = false
changes.processChangedFiles(files, &rows, &isEmpty, &isCoverageLow)
rows = append(rows, "")
return strings.Join(rows, "\n"), isEmpty, isCoverageLow
}
| [
"\"JOB_NAME\""
]
| []
| [
"JOB_NAME"
]
| [] | ["JOB_NAME"] | go | 1 | 0 | |
Algorithms/Implementation/FlatlandDistance.java | package Algorithms.Implementation;
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
/**
* HackerRank Algorithms Implementation 53
* https://www.hackerrank.com/challenges/flatland-space-stations/problem
* @author Hasol
*/
public class FlatlandDistance {
// Complete the flatlandSpaceStations function below.
static int flatlandSpaceStations(int n, int[] c) {
Arrays.sort(c);
int m = Integer.MIN_VALUE, d;
for (int i=1; i<c.length; i++)
m = (d = c[i] - c[i-1]) > m ? d : m;
m /= 2;
m = c[0] > m ? c[0] : m;
m = (d = n - c[c.length-1] -1) > m ? d : m;
return m;
}
static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nm = scanner.nextLine().split(" ");
int n = Integer.parseInt(nm[0]);
int m = Integer.parseInt(nm[1]);
int[] c = new int[m];
String[] cItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i=0; i<m; i++)
c[i] = Integer.parseInt(cItems[i]);
int result = flatlandSpaceStations(n, c);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
node-runner-cli/setup/Base.py | import getpass
import os
import sys
from pathlib import Path
import requests
from env_vars import NETWORK_ID
from utils.utils import run_shell_command, Helpers
class Base:
@staticmethod
def install_dependecies():
run_shell_command('sudo apt update', shell=True)
run_shell_command('sudo apt install -y docker.io wget unzip docker-compose rng-tools', shell=True)
run_shell_command('sudo rngd -r /dev/random', shell=True)
@staticmethod
def add_user_docker_group():
run_shell_command('sudo groupadd docker', shell=True, fail_on_error=False)
is_in_docker_group = run_shell_command('groups | grep docker', shell=True, fail_on_error=False)
if is_in_docker_group.returncode != 0:
run_shell_command(f"sudo usermod -aG docker {os.environ.get('USER')}", shell=True)
print('Exit ssh login and relogin back for user addition to group "docker" to take effect')
@staticmethod
def fetch_universe_json(trustenode, extraction_path="."):
run_shell_command(
f'sudo wget --no-check-certificate -O {extraction_path}/universe.json https://{trustenode}/universe.json',
shell=True)
@staticmethod
def generatekey(keyfile_path, keyfile_name="node-keystore.ks",keygen_tag="1.0.0"):
print('-----------------------------')
if os.path.isfile(f'{keyfile_path}/{keyfile_name}'):
# TODO AutoApprove
print(f"Node key file already exist at location {keyfile_path}")
keystore_password = getpass.getpass(f"Enter the password of the existing keystore file '{keyfile_name}':")
else:
# TODO AutoApprove
ask_keystore_exists = input \
(f"Do you have keystore file named '{keyfile_name}' already from previous node Y/n?:")
if Helpers.check_Yes(ask_keystore_exists):
print(
f"Copy the keystore file '{keyfile_name}' to the location {keyfile_path} and then rerun the command")
sys.exit()
else:
print(f"""
Generating new keystore file. Don't forget to backup the key from location {keyfile_path}/{keyfile_name}
""")
keystore_password = getpass.getpass(f"Enter the password of the new file '{keyfile_name}':")
# TODO keygen image needs to be updated
run_shell_command(['docker', 'run', '--rm', '-v', keyfile_path + ':/keygen/key',
f'radixdlt/keygen:{keygen_tag}',
f'--keystore=/keygen/key/{keyfile_name}',
'--password=' + keystore_password], quite=True
)
run_shell_command(['sudo', 'chmod', '644', f'{keyfile_path}/{keyfile_name}'])
return keystore_password, f'{keyfile_path}/{keyfile_name}'
@staticmethod
def download_ansible_file(ansible_dir, file):
req = requests.Request('GET', f'{ansible_dir}/{file}')
prepared = req.prepare()
resp = Helpers.send_request(prepared, print_response=False)
if not resp.ok:
print(f"{resp.status_code} error retrieving ansible playbook.. Existing the command...")
sys.exit()
directory = file.rsplit('/', 1)[0]
Path(directory).mkdir(parents=True, exist_ok=True)
with open(file, 'wb') as f:
f.write(resp.content)
@staticmethod
def setup_node_optimisation_config(version):
check_ansible = run_shell_command(f"pip list | grep ansible", shell=True, fail_on_error=False)
import subprocess
user = subprocess.check_output('whoami', shell=True).strip()
if check_ansible.returncode != 0:
print(f"Ansible not found for the user {user.decode('utf-8')}. Installing ansible now")
check_pip = run_shell_command("pip -V ", shell=True, fail_on_error=False)
if check_pip.returncode != 0:
print(f"Pip is not installed. Installing pip now")
run_shell_command('sudo apt install python3-pip', shell=True)
run_shell_command(f"pip install --user ansible==2.10.0", shell=True)
print("""
----------------------------------------------------------------------------------------
Ansible installed successfully. You need exit shell and login back""")
sys.exit()
ansible_dir = f'https://raw.githubusercontent.com/radixdlt/node-runner/{version}/node-runner-cli'
print(f"Downloading artifacts from {ansible_dir}\n")
Base.download_ansible_file(ansible_dir, 'ansible/project/provision.yml')
ask_setup_limits = input \
("Do you want to setup ulimits [Y/n]?:")
setup_limits = "true" if Helpers.check_Yes(ask_setup_limits) else "false"
run_shell_command(
f"ansible-playbook ansible/project/provision.yml -e setup_limits={setup_limits}",
shell=True)
ask_setup_swap = input \
("Do you want to setup swap space [Y/n]?:")
if Helpers.check_Yes(ask_setup_swap):
setup_swap = "true"
ask_swap_size = input \
("Enter swap size in GB. Example - 1G or 3G or 8G ?:")
run_shell_command(
f"ansible-playbook ansible/project/provision.yml -e setup_swap={setup_swap} -e swap_size={ask_swap_size}",
shell=True)
else:
setup_swap = "false"
@staticmethod
def get_data_dir():
# TODO AutoApprove
data_dir_path = input("Enter the absolute path to data DB folder:")
run_shell_command(f'sudo mkdir -p {data_dir_path}', shell=True)
return data_dir_path
@staticmethod
def get_network_id():
# Network id
network_prompt = input("Enter the network you want to connect [S]Stokenet or [M]Mainnet or network_id:")
if network_prompt.lower() in ["s", "stokenet"]:
network_id = 2
elif network_prompt.lower() in ["m", "mainnet"]:
network_id = 1
elif network_prompt in ["1", "2", "3", "4", "5", "6", "7", "8"]:
network_id = network_prompt
else:
print("Input for network id is wrong. Exiting command")
sys.exit()
return network_id
@staticmethod
def path_to_genesis_json(network_id):
if network_id not in [1, 2]:
genesis_json_location = input("Enter absolute path to genesis json:")
else:
genesis_json_location = None
return genesis_json_location
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
sigstore/_cli.py | # Copyright 2022 The Sigstore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
from importlib import resources
from pathlib import Path
from textwrap import dedent
from typing import TextIO, cast
from sigstore import __version__
from sigstore._internal.fulcio.client import DEFAULT_FULCIO_URL, FulcioClient
from sigstore._internal.oidc.ambient import detect_credential
from sigstore._internal.oidc.issuer import Issuer
from sigstore._internal.oidc.oauth import (
DEFAULT_OAUTH_ISSUER,
STAGING_OAUTH_ISSUER,
get_identity_token,
)
from sigstore._internal.rekor.client import DEFAULT_REKOR_URL, RekorClient
from sigstore._sign import Signer
from sigstore._verify import (
CertificateVerificationFailure,
VerificationFailure,
Verifier,
)
logger = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("SIGSTORE_LOGLEVEL", "INFO").upper())
class _Embedded:
"""
A repr-wrapper for reading embedded resources, needed to help `argparse`
render defaults correctly.
"""
def __init__(self, name: str) -> None:
self._name = name
def read(self) -> bytes:
return resources.read_binary("sigstore._store", self._name)
def __repr__(self) -> str:
return f"{self._name} (embedded)"
def _parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog="sigstore",
description="a tool for signing and verifying Python package distributions",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {__version__}"
)
subcommands = parser.add_subparsers(required=True, dest="subcommand")
# `sigstore sign`
sign = subcommands.add_parser(
"sign", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
oidc_options = sign.add_argument_group("OpenID Connect options")
oidc_options.add_argument(
"--identity-token",
metavar="TOKEN",
type=str,
help="the OIDC identity token to use",
)
oidc_options.add_argument(
"--oidc-client-id",
metavar="ID",
type=str,
default="sigstore",
help="The custom OpenID Connect client ID to use during OAuth2",
)
oidc_options.add_argument(
"--oidc-client-secret",
metavar="SECRET",
type=str,
help="The custom OpenID Connect client secret to use during OAuth2",
)
oidc_options.add_argument(
"--oidc-disable-ambient-providers",
action="store_true",
help="Disable ambient OpenID Connect credential detection (e.g. on GitHub Actions)",
)
output_options = sign.add_argument_group("Output options")
output_options.add_argument(
"--no-default-files",
action="store_true",
help="Don't emit the default output files ({input}.sig and {input}.crt)",
)
output_options.add_argument(
"--output-signature",
metavar="FILE",
type=Path,
help=(
"Write a single signature to the given file; conflicts with --output and "
"does not work with multiple input files"
),
)
output_options.add_argument(
"--output-certificate",
metavar="FILE",
type=Path,
help=(
"Write a single certificate to the given file; conflicts with --output and "
"does not work with multiple input files"
),
)
output_options.add_argument(
"--overwrite",
action="store_true",
help="Overwrite preexisting signature and certificate outputs, if present",
)
instance_options = sign.add_argument_group("Sigstore instance options")
instance_options.add_argument(
"--fulcio-url",
metavar="URL",
type=str,
default=DEFAULT_FULCIO_URL,
help="The Fulcio instance to use (conflicts with --staging)",
)
instance_options.add_argument(
"--rekor-url",
metavar="URL",
type=str,
default=DEFAULT_REKOR_URL,
help="The Rekor instance to use (conflicts with --staging)",
)
instance_options.add_argument(
"--ctfe",
dest="ctfe_pem",
metavar="FILE",
type=argparse.FileType("rb"),
help="A PEM-encoded public key for the CT log (conflicts with --staging)",
default=_Embedded("ctfe.pub"),
)
instance_options.add_argument(
"--rekor-root-pubkey",
metavar="FILE",
type=argparse.FileType("rb"),
help="A PEM-encoded root public key for Rekor itself (conflicts with --staging)",
default=_Embedded("rekor.pub"),
)
instance_options.add_argument(
"--oidc-issuer",
metavar="URL",
type=str,
default=DEFAULT_OAUTH_ISSUER,
help="The OpenID Connect issuer to use (conflicts with --staging)",
)
instance_options.add_argument(
"--staging",
action="store_true",
help="Use sigstore's staging instances, instead of the default production instances",
)
sign.add_argument(
"files",
metavar="FILE",
type=Path,
nargs="+",
help="The file to sign",
)
# `sigstore verify`
verify = subcommands.add_parser(
"verify", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
input_options = verify.add_argument_group("Verification inputs")
input_options.add_argument(
"--certificate",
"--cert",
metavar="FILE",
type=Path,
help="The PEM-encoded certificate to verify against; not used with multiple inputs",
)
input_options.add_argument(
"--signature",
metavar="FILE",
type=Path,
help="The signature to verify against; not used with multiple inputs",
)
verification_options = verify.add_argument_group("Extended verification options")
verification_options.add_argument(
"--cert-email",
metavar="EMAIL",
type=str,
help="The email address to check for in the certificate's Subject Alternative Name",
)
verification_options.add_argument(
"--cert-oidc-issuer",
metavar="URL",
type=str,
help="The OIDC issuer URL to check for in the certificate's OIDC issuer extension",
)
instance_options = verify.add_argument_group("Sigstore instance options")
instance_options.add_argument(
"--rekor-url",
metavar="URL",
type=str,
default=DEFAULT_REKOR_URL,
help="The Rekor instance to use (conflicts with --staging)",
)
instance_options.add_argument(
"--staging",
action="store_true",
help="Use sigstore's staging instances, instead of the default production instances",
)
verify.add_argument(
"files",
metavar="FILE",
type=Path,
nargs="+",
help="The file to verify",
)
return parser
def main() -> None:
parser = _parser()
args = parser.parse_args()
logger.debug(f"parsed arguments {args}")
# Stuff the parser back into our namespace, so that we can use it for
# error handling later.
args._parser = parser
if args.subcommand == "sign":
_sign(args)
elif args.subcommand == "verify":
_verify(args)
else:
parser.error(f"Unknown subcommand: {args.subcommand}")
def _sign(args: argparse.Namespace) -> None:
# `--no-default-files` has no effect on `--output-{signature,certificate}`,
# but we forbid it because it indicates user confusion.
if args.no_default_files and (args.output_signature or args.output_certificate):
args._parser.error(
"--no-default-files may not be combined with "
"--output-signature or --output-certificate",
)
# Fail if `--output-signature` or `--output-certificate` is specified
# *and* we have more than one input.
if (args.output_signature or args.output_certificate) and len(args.files) > 1:
args._parser.error(
"Error: --output-signature and --output-certificate can't be used with "
"explicit outputs for multiple inputs; consider using --output",
)
# Build up the map of inputs -> outputs ahead of any signing operations,
# so that we can fail early if overwriting without `--overwrite`.
output_map = {}
for file in args.files:
if not file.is_file():
args._parser.error(f"Input must be a file: {file}")
sig, cert = args.output_signature, args.output_certificate
if not sig and not cert and not args.no_default_files:
sig = file.parent / f"{file.name}.sig"
cert = file.parent / f"{file.name}.crt"
if not args.overwrite:
extants = []
if sig and sig.exists():
extants.append(str(sig))
if cert and cert.exists():
extants.append(str(cert))
if extants:
args._parser.error(
"Refusing to overwrite outputs without --overwrite: "
f"{', '.join(extants)}"
)
output_map[file] = {"cert": cert, "sig": sig}
# Select the signer to use.
if args.staging:
logger.debug("sign: staging instances requested")
signer = Signer.staging()
args.oidc_issuer = STAGING_OAUTH_ISSUER
elif args.fulcio_url == DEFAULT_FULCIO_URL and args.rekor_url == DEFAULT_REKOR_URL:
signer = Signer.production()
else:
signer = Signer(
fulcio=FulcioClient(args.fulcio_url),
rekor=RekorClient(
args.rekor_url, args.rekor_root_pubkey.read(), args.ctfe_pem.read()
),
)
# The order of precedence is as follows:
#
# 1) Explicitly supplied identity token
# 2) Ambient credential detected in the environment, unless disabled
# 3) Interactive OAuth flow
if not args.identity_token and not args.oidc_disable_ambient_providers:
args.identity_token = detect_credential()
if not args.identity_token:
issuer = Issuer(args.oidc_issuer)
if args.oidc_client_secret is None:
args.oidc_client_secret = "" # nosec: B105
args.identity_token = get_identity_token(
args.oidc_client_id,
args.oidc_client_secret,
issuer,
)
if not args.identity_token:
args._parser.error("No identity token supplied or detected!")
for file, outputs in output_map.items():
logger.debug(f"signing for {file.name}")
result = signer.sign(
input_=file.read_bytes(),
identity_token=args.identity_token,
)
print("Using ephemeral certificate:")
print(result.cert_pem)
print(f"Transparency log entry created at index: {result.log_entry.log_index}")
sig_output: TextIO
if outputs["sig"]:
sig_output = outputs["sig"].open("w")
else:
sig_output = sys.stdout
print(result.b64_signature, file=sig_output)
if outputs["sig"]:
print(f"Signature written to file {outputs['sig']}")
if outputs["cert"] is not None:
cert_output = open(outputs["cert"], "w")
print(result.cert_pem, file=cert_output)
print(f"Certificate written to file {outputs['cert']}")
def _verify(args: argparse.Namespace) -> None:
# Fail if `--certificate` or `--signature` is specified and we have more than one input.
if (args.certificate or args.signature) and len(args.files) > 1:
args._parser.error(
"--certificate and --signature can only be used with a single input file"
)
# The converse of `sign`: we build up an expected input map and check
# that we have everything so that we can fail early.
input_map = {}
for file in args.files:
if not file.is_file():
args._parser.error(f"Input must be a file: {file}")
sig, cert = args.signature, args.certificate
if sig is None:
sig = file.parent / f"{file.name}.sig"
if cert is None:
cert = file.parent / f"{file.name}.crt"
missing = []
if not sig.is_file():
missing.append(str(sig))
if not cert.is_file():
missing.append(str(cert))
if missing:
args._parser.error(
f"Missing verification materials for {(file)}: {', '.join(missing)}"
)
input_map[file] = {"cert": cert, "sig": sig}
if args.staging:
logger.debug("verify: staging instances requested")
verifier = Verifier.staging()
elif args.rekor_url == DEFAULT_REKOR_URL:
verifier = Verifier.production()
else:
# TODO: We need CLI flags that allow the user to figure the Fulcio cert chain
# for verification.
args._parser.error(
"Custom Rekor and Fulcio configuration for verification isn't fully supported yet!",
)
for file, inputs in input_map.items():
# Load the signing certificate
logger.debug(f"Using certificate from: {inputs['cert']}")
certificate = inputs["cert"].read_bytes()
# Load the signature
logger.debug(f"Using signature from: {inputs['sig']}")
signature = inputs["sig"].read_bytes()
logger.debug(f"Verifying contents from: {file}")
result = verifier.verify(
input_=file.read_bytes(),
certificate=certificate,
signature=signature,
expected_cert_email=args.cert_email,
expected_cert_oidc_issuer=args.cert_oidc_issuer,
)
if result:
print(f"OK: {file}")
else:
result = cast(VerificationFailure, result)
print(f"FAIL: {file}")
print(f"Failure reason: {result.reason}", file=sys.stderr)
if isinstance(result, CertificateVerificationFailure):
# If certificate verification failed, it's either because of
# a chain issue or some outdated state in sigstore itself.
# These might already be resolved in a newer version, so
# we suggest that users try to upgrade and retry before
# anything else.
print(
dedent(
f"""
This may be a result of an outdated `sigstore` installation.
Consider upgrading with:
python -m pip install --upgrade sigstore
Additional context:
{result.exception}
"""
),
file=sys.stderr,
)
sys.exit(1)
| []
| []
| [
"SIGSTORE_LOGLEVEL"
]
| [] | ["SIGSTORE_LOGLEVEL"] | python | 1 | 0 | |
test/e2e/externalservices/externalservices_universal.go | package externalservices
import (
"encoding/base64"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/kumahq/kuma/pkg/config/core"
. "github.com/kumahq/kuma/test/framework"
"github.com/kumahq/kuma/test/framework/deployments/externalservice"
)
func ExternalServicesOnUniversal() {
meshDefaulMtlsOn := `
type: Mesh
name: default
mtls:
enabledBackend: ca-1
backends:
- name: ca-1
type: builtin
networking:
outbound:
passthrough: false
`
externalService := `
type: ExternalService
mesh: default
name: external-service-%s
tags:
kuma.io/service: external-service-%s
kuma.io/protocol: http
networking:
address: %s
tls:
enabled: %s
caCert:
inline: "%s"
`
es1 := "1"
es2 := "2"
var cluster Cluster
var deployOptsFuncs []DeployOptionsFunc
BeforeEach(func() {
clusters, err := NewUniversalClusters(
[]string{Kuma3},
Silent)
Expect(err).ToNot(HaveOccurred())
// Global
cluster = clusters.GetCluster(Kuma3)
deployOptsFuncs = KumaUniversalDeployOpts
err = NewClusterSetup().
Install(Kuma(core.Standalone, deployOptsFuncs...)).
Setup(cluster)
Expect(err).ToNot(HaveOccurred())
err = cluster.VerifyKuma()
Expect(err).ToNot(HaveOccurred())
demoClientToken, err := cluster.GetKuma().GenerateDpToken("default", "demo-client")
Expect(err).ToNot(HaveOccurred())
err = NewClusterSetup().
Install(externalservice.Install(externalservice.HttpServer, externalservice.UniversalAppEchoServer)).
Install(externalservice.Install(externalservice.HttpsServer, externalservice.UniversalAppHttpsEchoServer)).
Install(DemoClientUniversal(AppModeDemoClient, "default", demoClientToken, WithTransparentProxy(true))).
Setup(cluster)
Expect(err).ToNot(HaveOccurred())
err = YamlUniversal(meshDefaulMtlsOn)(cluster)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
if ShouldSkipCleanup() {
return
}
err := cluster.DeleteKuma(deployOptsFuncs...)
Expect(err).ToNot(HaveOccurred())
err = cluster.DismissCluster()
Expect(err).ToNot(HaveOccurred())
})
It("should route to external-service", func() {
err := YamlUniversal(fmt.Sprintf(externalService,
es1, es1,
"kuma-3_externalservice-http-server:80",
"false", ""))(cluster)
Expect(err).ToNot(HaveOccurred())
stdout, _, err := cluster.ExecWithRetries("", "", "demo-client",
"curl", "-v", "-m", "3", "--fail", "external-service-1.mesh")
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(ContainSubstring("HTTP/1.1 200 OK"))
Expect(stdout).ToNot(ContainSubstring("HTTPS"))
stdout, _, err = cluster.ExecWithRetries("", "", "demo-client",
"curl", "-v", "-m", "3", "--fail", "kuma-3_externalservice-http-server:80")
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(ContainSubstring("HTTP/1.1 200 OK"))
Expect(stdout).ToNot(ContainSubstring("HTTPS"))
})
It("should route to external-service over tls", func() {
// when set invalid certificate
otherCert := "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lRSGRQaHhPZlhnV3VOeG9GbFYvRXdxVEFOQmdrcWhraUc5dzBCQVFzRkFEQVAKTVEwd0N3WURWUVFERXdScmRXMWhNQjRYRFRJd01Ea3hOakV5TWpnME5Gb1hEVE13TURreE5ERXlNamcwTkZvdwpEekVOTUFzR0ExVUVBeE1FYTNWdFlUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCCkFPWkdiV2hTbFFTUnhGTnQ1cC8yV0NLRnlIWjNDdXdOZ3lMRVA3blM0Wlh5a3hzRmJZU3VWM2JJZ0Y3YlQvdXEKYTVRaXJlK0M2MGd1aEZicExjUGgyWjZVZmdJZDY5R2xRekhNVlljbUxHalZRdXlBdDRGTU1rVGZWRWw1STRPYQorMml0M0J2aWhWa0toVXo4eTVSUjVLYnFKZkdwNFoyMEZoNmZ0dG9DRmJlT0RtdkJzWUpGbVVRUytpZm95TVkvClAzUjAzU3U3ZzVpSXZuejd0bWt5ZG9OQzhuR1JEemRENUM4Zkp2clZJMVVYNkpSR3lMS3Q0NW9RWHQxbXhLMTAKNUthTjJ6TlYyV3RIc2FKcDlid3JQSCtKaVpHZVp5dnVoNVV3ckxkSENtcUs3c205VG9kR3p0VVpZMFZ6QWM0cQprWVZpWFk4Z1VqZk5tK2NRclBPMWtOOENBd0VBQWFPQmd6Q0JnREFPQmdOVkhROEJBZjhFQkFNQ0FxUXdIUVlEClZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQk1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWUQKVlIwT0JCWUVGR01EQlBQaUJGSjNtdjJvQTlDVHFqZW1GVFYyTUI4R0ExVWRFUVFZTUJhQ0NXeHZZMkZzYUc5egpkSUlKYkc5allXeG9iM04wTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDLzE3UXdlT3BHZGIxTUVCSjhYUEc3CjNzSy91dG9XTFgxdGpmOFN1MURnYTZDRFQvZVRXSFpyV1JmODFLT1ZZMDdkbGU1U1JJREsxUWhmYkdHdEZQK1QKdlprcm9vdXNJOVVTMmFDV2xrZUNaV0dUbnF2TG1Eb091anFhZ0RvS1JSdWs0bVFkdE5Ob254aUwvd1p0VEZLaQorMWlOalVWYkxXaURYZEJMeG9SSVZkTE96cWIvTU54d0VsVXlhVERBa29wUXlPV2FURGtZUHJHbWFXamNzZlBHCmFPS293MHplK3pIVkZxVEhiam5DcUVWM2huc1V5UlV3c0JsbjkrakRKWGd3Wk0vdE1sVkpyWkNoMFNsZTlZNVoKTU9CMGZDZjZzVE1OUlRHZzVMcGw2dUlZTS81SU5wbUhWTW8zbjdNQlNucEVEQVVTMmJmL3VvNWdJaXE2WENkcAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
err := YamlUniversal(fmt.Sprintf(externalService,
es2, es2,
"kuma-3_externalservice-https-server:443",
"true",
otherCert))(cluster)
Expect(err).ToNot(HaveOccurred())
// then accessing the secured external service fails
_, _, err = cluster.ExecWithRetries("", "", "demo-client",
"curl", "-v", "-m", "3", "--fail", "http://kuma-3_externalservice-https-server:443")
Expect(err).To(HaveOccurred())
// when set proper certificate
externalServiceCaCert := externalservice.From(cluster, externalservice.HttpsServer).GetCert()
Expect(externalServiceCaCert).ToNot(BeEmpty())
err = YamlUniversal(fmt.Sprintf(externalService,
es2, es2,
"kuma-3_externalservice-https-server:443",
"true",
base64.StdEncoding.EncodeToString([]byte(externalServiceCaCert))))(cluster)
Expect(err).ToNot(HaveOccurred())
// then accessing the secured external service succeeds
stdout, _, err := cluster.ExecWithRetries("", "", "demo-client",
"curl", "-v", "-m", "3", "--fail", "http://kuma-3_externalservice-https-server:443")
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(ContainSubstring("HTTP/1.1 200 OK"))
Expect(stdout).To(ContainSubstring("HTTPS"))
})
// certauth.idrix.fr is a site for testing mTLS authentication
// This site requires renegotiation because the server asks for the client certs as a second step
// We want to run this only on demand because we've got bad experience tying up E2E to external service available on the internet
// It's hard to rebuild this as a local service in the cluster because many servers dropped support for renegotiation.
PIt("should check allow negotiation", func() {
// given
externalService := `
type: ExternalService
mesh: default
name: testmtls
tags:
kuma.io/service: testmtls
kuma.io/protocol: http
networking:
address: certauth.idrix.fr:443
tls:
enabled: true
allowRenegotiation: true
caCert:
inline: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTakNDQWpLZ0F3SUJBZ0lRUksrd2dOYWpKN3FKTURtR0x2aEFhekFOQmdrcWhraUc5dzBCQVFVRkFEQS8KTVNRd0lnWURWUVFLRXh0RWFXZHBkR0ZzSUZOcFoyNWhkSFZ5WlNCVWNuVnpkQ0JEYnk0eEZ6QVZCZ05WQkFNVApEa1JUVkNCU2IyOTBJRU5CSUZnek1CNFhEVEF3TURrek1ESXhNVEl4T1ZvWERUSXhNRGt6TURFME1ERXhOVm93ClB6RWtNQ0lHQTFVRUNoTWJSR2xuYVhSaGJDQlRhV2R1WVhSMWNtVWdWSEoxYzNRZ1EyOHVNUmN3RlFZRFZRUUQKRXc1RVUxUWdVbTl2ZENCRFFTQllNekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTit2NlpkUUNJTlh0TXhpWmZhUWd1ekgweXhyTU1wYjdObkRmY2RBd1JnVWkrRG9NM1pKS3VNL0lVbVRyRTRPCnJ6NUl5Mlh1L05NaEQyWFNLdGt5ajR6bDkzZXdFbnUxbGNDSm82bTY3WE11ZWd3R01vT2lmb29VTU0wUm9PRXEKT0xsNUNqSDlVTDJBWmQrM1VXT0R5T0tJWWVwTFlZSHNVbXU1b3VKTEdpaWZTS09lRE5vSmpqNFhMaDdkSU45Ygp4aXFLcXk2OWNLM0ZDeG9sa0hSeXhYdHFxelRXTUluLzVXZ1RlMVFMeU5hdTdGcWNraDQ5WkxPTXh0Ky95VUZ3CjdCWnkxU2JzT0ZVNVE5RDgvUmhjUVBHWDY5V2FtNDBkdXRvbHVjYlkzOEVWQWpxcjJtN3hQaTcxWEFpY1BOYUQKYWVRUW14a3F0aWxYNCtVOW01L3dBbDBDQXdFQUFhTkNNRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFPQmdOVgpIUThCQWY4RUJBTUNBUVl3SFFZRFZSME9CQllFRk1TbnNhUjdMSEg2MitGTGtIWC94QlZnaFlrUU1BMEdDU3FHClNJYjNEUUVCQlFVQUE0SUJBUUNqR2l5YkZ3QmNxUjd1S0dZM09yK0R4ejlMd3dtZ2xTQmQ0OWxaUk5JK0RUNjkKaWt1Z2RCL09FSUtjZEJvZGZwZ2EzY3NUUzdNZ1JPU1I2Y3o4ZmFYYmF1WCs1djNnVHQyM0FEcTFjRW12OHVYcgpBdkhSQW9zWnk1UTZYa2pFR0I1WUdWOGVBbHJ3RFBHeHJhbmNXWWFMYnVtUjlZYksrcmxtTTZwWlc4N2lweFp6ClI4c3J6Sm13TjBqUDQxWkw5YzhQREhJeWg4YndSTHRUY20xRDlTWkltbEpudDFpci9tZDJjWGpiRGFKV0ZCTTUKSkRHRm9xZ0NXakJINGQxUUI3d0NDWkFBNjJSallKc1d2SWpKRXViU2ZaR0wrVDB5aldXMDZYeXhWM2JxeGJZbwpPYjhWWlJ6STluZVdhZ3FOZHd2WWtRc0VqZ2ZiS2JZSzdwMkNOVFVRCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVaVENDQTAyZ0F3SUJBZ0lRUUFGMUJJTVVwTWdoaklTcERCYk4zekFOQmdrcWhraUc5dzBCQVFzRkFEQS8KTVNRd0lnWURWUVFLRXh0RWFXZHBkR0ZzSUZOcFoyNWhkSFZ5WlNCVWNuVnpkQ0JEYnk0eEZ6QVZCZ05WQkFNVApEa1JUVkNCU2IyOTBJRU5CSUZnek1CNFhEVEl3TVRBd056RTVNakUwTUZvWERUSXhNRGt5T1RFNU1qRTBNRm93Ck1qRUxNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQW9URFV4bGRDZHpJRVZ1WTNKNWNIUXhDekFKQmdOVkJBTVQKQWxJek1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdXdJVktNejJvSlRURHhMcwpqVldTdy9pQzhabW1la0tJcDEwbXFyVXJ1Y1ZNc2ErT2EvbDF5S1BYRDBlVUZGVTFWNHllcUtJNUdmV0NQRUtwClRtNzFPOE11MjQzQXNGenpXVGpuN2M5cDhGb0xHNzdBbENRbGgvbzNjYk1UNXh5czRadnYyK1E3UlZKRmxxbkIKVTg0MHlGTHV0YTd0ajk1Z2NPS2xWS3UyYlE2WHBVQTBheXZUdkdiclpqUjgrbXVMajFjcG1mZ3dGMTI2Y20vNwpnY1d0MG9aWVBSZkg1d203OFN2M2h0ekIybkZkMUVianpLMGx3WWk4WUdkMVpyUHhHUGVpWE9aVC96cUl0a2VsCi94TVk2cGdKZHorZFUvblBBZVgxcG5BWEZLOWpwUCtaczVPZDNGT25CdjVJaFIyaGFhNGxkYnNUekZJRDllMVIKb1l2YkZRSURBUUFCbzRJQmFEQ0NBV1F3RWdZRFZSMFRBUUgvQkFnd0JnRUIvd0lCQURBT0JnTlZIUThCQWY4RQpCQU1DQVlZd1N3WUlLd1lCQlFVSEFRRUVQekE5TURzR0NDc0dBUVVGQnpBQ2hpOW9kSFJ3T2k4dllYQndjeTVwClpHVnVkSEoxYzNRdVkyOXRMM0p2YjNSekwyUnpkSEp2YjNSallYZ3pMbkEzWXpBZkJnTlZIU01FR0RBV2dCVEUKcDdHa2V5eHgrdHZoUzVCMS84UVZZSVdKRURCVUJnTlZIU0FFVFRCTE1BZ0dCbWVCREFFQ0FUQS9CZ3NyQmdFRQpBWUxmRXdFQkFUQXdNQzRHQ0NzR0FRVUZCd0lCRmlKb2RIUndPaTh2WTNCekxuSnZiM1F0ZURFdWJHVjBjMlZ1ClkzSjVjSFF1YjNKbk1Ed0dBMVVkSHdRMU1ETXdNYUF2b0MyR0syaDBkSEE2THk5amNtd3VhV1JsYm5SeWRYTjAKTG1OdmJTOUVVMVJTVDA5VVEwRllNME5TVEM1amNtd3dIUVlEVlIwT0JCWUVGQlF1c3hlM1dGYkxybEFKUU9ZZgpyNTJMRk1MR01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFOQmdrcWhraUc5dzBCCkFRc0ZBQU9DQVFFQTJVemd5ZldFaURjeDI3c1Q0clA4aTJ0aUVteFl0MGwrUEFLM3FCOG9ZZXZPNEM1ejcwa0gKZWpXRUh4MnRhUERZL2xhQkwyMS9XS1p1TlRZUUhIUEQ1YjF0WGdIWGJuTDdLcUM0MDFkazVWdkNhZFRRc3ZkOApTOE1Yam9oeWM5ejkvRzI5NDhrTGptRTZGbGg5ZERZclZZQTl4Mk8raEVQR09hRU9hMWVlUHluQmdQYXl2VWZMCnFqQnN0ekxoV1ZRTEdBa1hYbU5zKzVablBCeHpESk9MeGhGMkpJYmVRQWNINUgwdFpyVWxvNVpZeU9xQTdzOXAKTzViODVvM0FNL09KK0NrdEZCUXRmdkJoY0pWZDl3dmx3UHNrK3V5T3kySEk3bU54S0tnc0JUdDM3NXRlQTJUdwpVZEhraFZOY3NBS1gxSDdHTk5MT0VBRGtzZDg2d3VvWHZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
clientCert: # we can pass any client tls pair to this server so here are certs generated by kumactl generate tls-certificate --type=client --key-file=client.key --cert-file=client.pem
inline: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJRENDQWdpZ0F3SUJBZ0lSQU4xeHJ2aGhyMExuSlN0czRrdDAyNkF3RFFZSktvWklodmNOQVFFTEJRQXcKRHpFTk1Bc0dBMVVFQXhNRWEzVnRZVEFlRncweU1UQTJNRGt4TURVek16QmFGdzB6TVRBMk1EY3hNRFV6TXpCYQpNQTh4RFRBTEJnTlZCQU1UQkd0MWJXRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCCkFRQ3hFMnV0TmRXZERpdXFab2M0bmpsdWRiWkFwdktyekRpRHQvTWhnK1piKzU2djlSMW50ajQ3SjRuQ2RBU20KWFRDenBHclVVSzNuNmkycXE5THl6SVZFWWoySEtsclFJTlVvN3QyUkRvbThwMHRtNFdSWWd6NnYwMlM4c2M5TwpFSjJUSE5RVWFyUnJWQTZxY0lic2RPUk5aTGREVnRXWndkY25WTHNQaUtDakluUGczem5vd21jWjhXbHJqKzNHClg3SVYwR0FWRDNjNmxBVlc3QXFXZVRxdHRMVVNOVitlU2JubHhGRjdveDlkZ1FydjVNdUY2T3BUUnRod1N3VHoKOG5jeHBVanhvTTBHYzhRUDdvSUQ3V3FPNlFUU2dFUUJaMFpJMFY2OC9zZUV3cVQxZ2F0YnorT2hPOVduaFRKLwpkMnBPWEZNNHkycWJ2bzJ4OW52MjVsTlZBZ01CQUFHamR6QjFNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQWRCZ05WCkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVU2N3pEaG1aUzB3cTE0b2RqN0JoS29GQ0Z2L1l3RkFZRFZSMFJCQTB3QzRJSmJHOWpZV3hvYjNOMApNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUExZDV0T1B4VUIwWmh5KzdRVG5YU25SdFl1L2VhaWw3WnduK1NTCndrNDlUSlg5L0RJeFkzVFd1aTYrZE5rcVFnQ1lNbm4vN2pLOTF2Y1dLTVNMSXgxUVNlT3BqYkR5cHJYQlduVHUKWDNaeENlUkQraVFEL0pPQ3ZXZ1ljT0daSnU2MmVvVmh6bzdzZU8zVnVpRmlSOVNsRTU1TE9ETC9aaFBzRjVxWQp3NzFBZm1ZQXNXQ1ZlT3A1cjBpK3pYU0pyaDh6V2xSQllrTDhPZlppMUtDT1liYlhxaHRaZGJkeTBDQStreVVGCkN4bm00dFBwNkE1UEpVNGNhYmppWUVQRGRqOS9BMnY5SlE2dDJhVHVKaE42WUo4enVNc2NaeVJUaFlnd0lBZGsKckRLWEF4NlpndzV2ejFXMnVDTGpzQVJPUXpoVU5TR3FPajVjUVZDNklDaVRNQzZECi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
clientKey:
inline: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBc1JOcnJUWFZuUTRycW1hSE9KNDViblcyUUtieXE4dzRnN2Z6SVlQbVcvdWVyL1VkClo3WStPeWVKd25RRXBsMHdzNlJxMUZDdDUrb3RxcXZTOHN5RlJHSTloeXBhMENEVktPN2RrUTZKdktkTFp1RmsKV0lNK3I5Tmt2TEhQVGhDZGt4elVGR3EwYTFRT3FuQ0c3SFRrVFdTM1ExYlZtY0hYSjFTN0Q0aWdveUp6NE44NQo2TUpuR2ZGcGE0L3R4bCt5RmRCZ0ZROTNPcFFGVnV3S2xuazZyYlMxRWpWZm5rbTU1Y1JSZTZNZlhZRUs3K1RMCmhlanFVMGJZY0VzRTgvSjNNYVZJOGFETkJuUEVEKzZDQSsxcWp1a0Uwb0JFQVdkR1NORmV2UDdIaE1LazlZR3IKVzgvam9UdlZwNFV5ZjNkcVRseFRPTXRxbTc2TnNmWjc5dVpUVlFJREFRQUJBb0lCQURqaGdDazNyZEt4aHAxSwpLZzJwNWREeHh3V2xtelpNZDZyNElBV1lGUnRmREc2QlVsektVZHMyckMzbWpzZlNENTdsSmR2bHZyZE1wamE0CjB4NWpURHZYUXVSMFdvK1l2R0JWdXA2cUNOeXM4Syt2bjBnL2dKZUNWRTI0NEZxM1E2YktEK1l2RUoyWmRzeVIKTVFZcjFscDJDOWg1d0V1UDFNa3hrcFUrMGpzVWdVWFpBeStVNWQ2RS9CU0s0UTZSQkZBMnY4VEViMGxWdGpVWgpaajRiRUxNL2Z1MXpibEFwSWc3Q3A0d2lObktXRjM3N0IyUEl6eGhIMWNmL1VmSVFTL0h3bDRCelV0c1hiRVlZCnU4UXQ3c2NFdElqeFkwSVI2NGVUNGZDYklNVzNEV2cydWR3WFFWSnJpdEh5UGRUaFRwYm04bFJsMW1sSHJMa3YKdXBUd1k0RUNnWUVBenhTYUVPTFpIRVFsUStTelBVM0l4S1pTdXNNQStFZkVncEtuVUZKMEVyZ3oxOHEyYzROWApCRnVRNU5uYlhKQ2dHZWUycmlGUmxNUm5UeFM2MWFMZDFBVXF5SnFyay9jaTNCQjZyejVUMnl3dlB6aHhSM1JXCmdrMGxYcW5xVGxHd3pENHVmWmEwNlJpMnZ6YzF0M3BpN3RZNldXeXRiTGRQMUN6L0pHZTBpMzBDZ1lFQTJ1aEMKaFdMUXdtY203YmRyUE80VkJROXZjVlFsVHQ2RDFJd2tHT0VkdkF0SzJJeXZGSWdDUzh0cm9EVG1PazE2NmtINwo3OGdiOGNmOXhEZERaNnpqYjd1R3lrTVQ3SkRxOWlKUTdMdzljaVA4QnVoQUtSejdNcXcvWFk2MnJRcUJQd1NkClZQRFNERVJjMkpqcHhpSUlPeW1ueGdDTTlSbFVQVWRVK3NxK2Zya0NnWUIra3I4Zzl5ZHhpWTJsbEJLaXMvcTEKaUZ3azM3Q21FV2ZoejdZSStIME9QQjBrRnpteUhXT0F2Rjh5SXA5Y1V1SXBNMktMeUwzT3lzWENwbzhVcWZvZwo4QStZa2tHeHJXdFhTNU5ScmkwZldFQ0F5Z1VqZ2M2bTBuUzNDZkMzY21NNFZBR2lyZzFpTk1MdTJkWXhrZE1LCjNWTEkrZzUrMXdVcVVWNmFaL0VKR1FLQmdCQlRzbUp3ZEZHTGtBTzY0bXl3OVRCamJsUnRpanJQcmRWMGZseTgKclpNUTVJd3lNZnkrQ0MzUEJqLzBzaGMzSUN2SXNCbTZPeHRWWnovelB6dkVVVkpNRWttVHB6REZ2a0NOWHF2SgpmbXU4ODFjd2kxaUZxTmFtc2pNd0tiL09RTVdLZXBHVFJKZFZvZmNsc0ludWo5Nlp4TUduMk51UEFCRngrSXljCkFvbEJBb0dBUmdLeUlKa2xocUN4elFUUEJQK0VpM2ZXNzV3NWp5bjQ2N0dqQVQ1NVRkb1VsYWJxTTVPTTJwUkMKWXByMTEyNnZEdkU3VDJWdkcwS1RqRFJoai82YmFnSjE5ZTNqc2twQVZxdGUxM3lGUFk4ZTdaMkNKU1hBUS9FVQpsL2grcnJxb0ozNjNRdVB4eGhCWDRTMkMxRG9ndWlrSHprMW5iNUdCeXN1WjVzeE9RbE09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
`
err := YamlUniversal(externalService)(cluster)
Expect(err).ToNot(HaveOccurred())
// when
stdout, _, err := cluster.ExecWithRetries("", "", "demo-client",
"curl", "-v", "-m", "3", "--fail", "testmtls.mesh")
// then
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(ContainSubstring("HTTP/1.1 200 OK"))
Expect(stdout).To(ContainSubstring("TLSv1.2 Authentication OK!"))
Expect(stdout).To(ContainSubstring("CN=kuma"))
})
}
| []
| []
| []
| [] | [] | go | null | null | null |
tests/test_elk.py | import os
import pytest
import elasticsearch
import testspace_colab.elk as elk_module
@pytest.mark.skipif(
"CODESPACES" in os.environ, reason="docker not supported in codespace yet"
)
class TestELKDocker:
@pytest.mark.parametrize("elk_state", [None])
def test_not_instanciated(self, elk_api):
assert elk_api.container is None
assert elk_api.get_health() is None
assert elk_api.available is False
assert elk_api.elastic_search is None
assert elk_api.es_cluster_client is None
with pytest.raises(TimeoutError):
elk_api.wait_for_available(timeout=0.1)
assert elk_api.stop() is None # nothing should happen
def test_tag(self):
elk = elk_module.ELK(elk_tag="1234")
assert elk.elk_docker_tag == "1234"
@pytest.mark.parametrize("elk_state", [None, "running", "stopped"])
def test_start(self, elk_api):
elk_api.start()
assert elk_api.container.status == "running"
assert "cluster_name" in elk_api.get_health()
elk_api.start() # This should do nothing
assert elk_api.available is True
elk_api.wait_for_available(
timeout=0.1
) # should return without triggering an exception
assert isinstance(elk_api.elastic_search, elasticsearch.Elasticsearch)
assert "tagline" in elk_api.elastic_search.info()
@pytest.mark.parametrize("elk_state", ["running"])
def test_stop(self, elk_api):
assert elk_api.container.status == "running"
elk_api.stop()
assert elk_api.container.status == "exited"
assert elk_api.get_health() is None
assert elk_api.available is False
assert elk_api.elastic_search is None
assert elk_api.es_cluster_client is None
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Lib/pydoc.py | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
At the Python interactive prompt, calling help(thing) on a Python object
documents the object, and calling help() starts up an interactive
help session.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -n <hostname>" to start an HTTP server with the given
hostname (default: localhost) on the local machine.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. Combine with
the -n and -p options to control the hostname and port used.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import importlib._bootstrap
import importlib._bootstrap_external
import importlib.machinery
import importlib.util
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import sysconfig
import time
import tokenize
import urllib.parse
import warnings
from collections import deque
from reprlib import Repr
from traceback import format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def _findclass(func):
cls = sys.modules.get(func.__module__)
if cls is None:
return None
for name in func.__qualname__.split('.')[:-1]:
cls = getattr(cls, name)
if not inspect.isclass(cls):
return None
return cls
def _finddoc(obj):
if inspect.ismethod(obj):
name = obj.__func__.__name__
self = obj.__self__
if (inspect.isclass(self) and
getattr(getattr(self, name, None), '__func__') is obj.__func__):
# classmethod
cls = self
else:
cls = self.__class__
elif inspect.isfunction(obj):
name = obj.__name__
cls = _findclass(obj)
if cls is None or getattr(cls, name) is not obj:
return None
elif inspect.isbuiltin(obj):
name = obj.__name__
self = obj.__self__
if (inspect.isclass(self) and
self.__qualname__ + '.' + name == obj.__qualname__):
# classmethod
cls = self
else:
cls = self.__class__
# Should be tested before isdatadescriptor().
elif isinstance(obj, property):
func = obj.fget
name = func.__name__
cls = _findclass(func)
if cls is None or getattr(cls, name) is not obj:
return None
elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):
name = obj.__name__
cls = obj.__objclass__
if getattr(cls, name) is not obj:
return None
if inspect.ismemberdescriptor(obj):
slots = getattr(cls, '__slots__', None)
if isinstance(slots, dict) and name in slots:
return slots[name]
else:
return None
for base in cls.__mro__:
try:
doc = _getowndoc(getattr(base, name))
except AttributeError:
continue
if doc is not None:
return doc
return None
def _getowndoc(obj):
"""Get the documentation string for an object if it is not
inherited from its class."""
try:
doc = object.__getattribute__(obj, '__doc__')
if doc is None:
return None
if obj is not type:
typedoc = type(obj).__doc__
if isinstance(typedoc, str) and typedoc == doc:
return None
return doc
except AttributeError:
return None
def _getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
doc = _getowndoc(object)
if doc is None:
try:
doc = _finddoc(object)
except (AttributeError, TypeError):
return None
if not isinstance(doc, str):
return None
return inspect.cleandoc(doc)
def getdoc(object):
"""Get the doc string or comments for an object."""
result = _getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_bound_method(fn):
"""
Returns True if fn is a bound method, regardless of whether
fn was implemented in Python or in C.
"""
if inspect.ismethod(fn):
return True
if inspect.isbuiltin(fn):
self = getattr(fn, '__self__', None)
return not (inspect.ismodule(self) or (self is None))
return False
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, inspect.isroutine):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
# XXX Remove __initializing__?
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__spec__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
if isinstance(value, property) and value.fset is None:
kind = 'readonly property'
results.append((name, kind, cls, value))
return results
def sort_attributes(attrs, object):
'Sort the attrs list in-place by _fields and then alphabetically by name'
# This allows data descriptors to be ordered according
# to a _fields attribute if present.
fields = getattr(object, '_fields', [])
try:
field_order = {name : i-len(fields) for (i, name) in enumerate(fields)}
except TypeError:
field_order = {}
keyfunc = lambda attr: (field_order.get(attr[0], 0), attr[0])
attrs.sort(key=keyfunc)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
# Look for binary suffixes first, falling back to source.
if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
loader_cls = importlib.machinery.SourcelessFileLoader
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
loader_cls = importlib.machinery.ExtensionFileLoader
else:
loader_cls = None
# Now handle the choice.
if loader_cls is None:
# Must be a source file.
try:
file = tokenize.open(filename)
except OSError:
# module can't be opened, so skip it
return None
# text modules can be directly examined
with file:
result = source_synopsis(file)
else:
# Must be a binary module, which has to be imported.
loader = loader_cls('__temp__', filename)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location('__temp__', filename,
loader=loader)
try:
module = importlib._bootstrap._load(spec)
except:
return None
del sys.modules['__temp__']
result = module.__doc__.splitlines()[0] if module.__doc__ else None
# Cache the result.
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = importlib.util.MAGIC_NUMBER
with open(path, 'rb') as file:
is_bytecode = magic == file.read(len(magic))
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
if is_bytecode:
loader = importlib._bootstrap_external.SourcelessFileLoader(name, path)
else:
loader = importlib._bootstrap_external.SourceFileLoader(name, path)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location(name, path, loader=loader)
try:
return importlib._bootstrap._load(spec)
except:
raise ErrorDuringImport(path, sys.exc_info())
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif issubclass(exc, ImportError) and value.name == path:
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"https://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if inspect.isdatadescriptor(object): return self.docdata(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = urllib.parse.quote(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self.docdata(value, name, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.docdata(value, name, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
doc = getdoc(value)
if not doc:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if object is not builtins.object and thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
sort_attributes(attrs, object)
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Readonly properties %s" % tag, attrs,
lambda t: t[1] == 'readonly property')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
decl = ''
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if argspec and argspec != '()':
decl = name + self.escape(argspec) + '\n\n'
doc = getdoc(object)
if decl:
doc = decl + (doc or '')
doc = self.markup(doc, self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
if (inspect.iscoroutinefunction(object) or
inspect.isasyncgenfunction(object)):
asyncqualifier = 'async '
else:
asyncqualifier = ''
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if cl and inspect.getattr_static(cl, realname, []) is object:
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = asyncqualifier + title + self.escape(argspec) + (note and
self.grey('<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
doc = self.markup(getdoc(object), self.preformat)
if doc:
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
docproperty = docdata
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
contents = []
push = contents.append
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if argspec and argspec != '()':
push(name + argspec + '\n')
doc = getdoc(object)
if doc:
push(doc + '\n')
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# List the built-in subclasses, if any:
subclasses = sorted(
(str(cls.__name__) for cls in type.__subclasses__(object)
if not cls.__name__.startswith("_") and cls.__module__ == "builtins"),
key=str.lower
)
no_of_subclasses = len(subclasses)
MAX_SUBCLASSES_TO_DISPLAY = 4
if subclasses:
push("Built-in subclasses:")
for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]:
push(' ' + subclassname)
if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY:
push(' ... and ' +
str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) +
' other subclasses')
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self.docdata(value, name, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.docdata(value, name, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
doc = getdoc(value)
try:
obj = getattr(object, name)
except AttributeError:
obj = homecls.__dict__[name]
push(self.docother(obj, name, mod, maxlen=70, doc=doc) +
'\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if object is not builtins.object and thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
sort_attributes(attrs, object)
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Readonly properties %s:\n" % tag, attrs,
lambda t: t[1] == 'readonly property')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
if (inspect.iscoroutinefunction(object) or
inspect.isasyncgenfunction(object)):
asyncqualifier = 'async '
else:
asyncqualifier = ''
if name == realname:
title = self.bold(realname)
else:
if cl and inspect.getattr_static(cl, realname, []) is object:
skipdocs = 1
title = self.bold(name) + ' = ' + realname
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = asyncqualifier + title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(object) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
docproperty = docdata
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if not doc:
doc = getdoc(object)
if doc:
line += '\n' + self.indent(str(doc)) + '\n'
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
use_pager = os.environ.get('MANPAGER') or os.environ.get('PAGER')
if use_pager:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), use_pager)
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), use_pager)
else:
return lambda text: pipepager(text, use_pager)
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32':
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
pipe.write(text)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
with open(filename, 'w', errors='backslashreplace') as file:
file.write(text)
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def _escape_stdout(text):
# Escape non-encodable characters to avoid encoding errors later
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
return text.encode(encoding, 'backslashreplace').decode(encoding)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_escape_stdout(text)).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError, io.UnsupportedOperation):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(_escape_stdout(text)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError('''\
No Python documentation found for %r.
Use help() to get the interactive help utility.
Use help(str) for help on the str class.''' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isdatadescriptor(object) or
_getdoc(object)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
if hasattr(object, '__origin__'):
object = object.__origin__
else:
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
with open(name + '.html', 'w', encoding='utf-8') as file:
file.write(page)
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/extensions/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'__peg_parser__': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'async': ('async', ''),
'await': ('await', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')]
_symbols_inverse = {
'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
@property
def input(self):
return self._input or sys.stdin
@property
def output(self):
return self._output or sys.stdout
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<%s.%s instance>' % (self.__class__.__module__,
self.__class__.__qualname__)
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = request.strip()
# Make sure significant trailing quoting marks of literals don't
# get deleted while cleaning input
if (len(request) > 2 and request[0] == request[-1] in ("'", '"')
and request[0] not in request[1:-1]):
request = request[1:-1]
if request.lower() in ('q', 'quit'): break
if request == 'help':
self.intro()
else:
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
else: doc(str, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python {0}'s help utility!
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at https://docs.python.org/{0}/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, symbols, or topics, type
"modules", "keywords", "symbols", or "topics". Each module also comes
with a one-line summary of what it does; to list the modules whose name
or summary contain a given string such as "spam", type "modules spam".
'''.format('%d.%d' % sys.version_info[:2]))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
doc = doc.strip() + '\n'
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import textwrap
text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n'
wrapped_text = textwrap.wrap(text, 72)
doc += '\n%s\n' % '\n'.join(wrapped_text)
pager(doc)
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of modules whose name or summary contains '{}'.
If there are any, enter a module name to get more help.
'''.format(key))
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose name or summary contain the string "spam".
''')
help = Helper()
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
spec = pkgutil._get_spec(importer, modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
loader = spec.loader
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = importlib._bootstrap._load(spec)
except ImportError:
if onerror:
onerror(modname)
continue
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, hostname, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.monotonic()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.monotonic() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, host, port, callback):
self.host = host
self.address = (self.host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, host, port):
self.urlhandler = urlhandler
self.host = host
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.host, self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.join()
# explicitly break a reference cycle: DocServer.callback
# has indirectly a reference to ServerThread.
self.docserver = None
self.serving = False
self.url = None
thread = ServerThread(urlhandler, hostname, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<[email protected]></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
def onerror(modname):
pass
ModuleScanner().run(callback, key, onerror=onerror)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True, hostname='localhost'):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, hostname, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def _get_revised_path(given_path, argv0):
"""Ensures current directory is on returned path, and argv0 directory is not
Exception: argv0 dir is left alone if it's also pydoc's directory.
Returns a new path entry list, or None if no adjustment is needed.
"""
# Scripts may get the current directory in their path by default if they're
# run with the -m switch, or directly from the current directory.
# The interactive prompt also allows imports from the current directory.
# Accordingly, if the current directory is already present, don't make
# any changes to the given_path
if '' in given_path or os.curdir in given_path or os.getcwd() in given_path:
return None
# Otherwise, add the current directory to the given path, and remove the
# script directory (as long as the latter isn't also pydoc's directory.
stdlib_dir = os.path.dirname(__file__)
script_dir = os.path.dirname(argv0)
revised_path = given_path.copy()
if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir):
revised_path.remove(script_dir)
revised_path.insert(0, os.getcwd())
return revised_path
# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself
def _adjust_cli_sys_path():
"""Ensures current directory is on sys.path, and __main__ directory is not.
Exception: __main__ dir is left alone if it's also pydoc's directory.
"""
revised_path = _get_revised_path(sys.path, sys.argv[0])
if revised_path is not None:
sys.path[:] = revised_path
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
_adjust_cli_sys_path()
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w')
writing = False
start_server = False
open_browser = False
port = 0
hostname = 'localhost'
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if opt == '-n':
start_server = True
hostname = val
if start_server:
browse(port, hostname=hostname, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -n <hostname>
Start an HTTP server with the given hostname (default: localhost).
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. This option can be used in
combination with -n and/or -p.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
| []
| []
| [
"PAGER",
"PYTHONDOCS",
"LINES",
"TERM",
"MANPAGER"
]
| [] | ["PAGER", "PYTHONDOCS", "LINES", "TERM", "MANPAGER"] | python | 5 | 0 | |
ROS/devel/_setup_util.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/nicolas/objectorientation/devel;/home/gabriel/object-recogn/devel;/home/gabriel/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/os/example_test.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os_test
import (
"errors"
"fmt"
"io/fs"
"log"
"os"
"path/filepath"
"time"
)
func ExampleOpenFile() {
f, err := os.OpenFile("notes.txt", os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func ExampleOpenFile_append() {
// If the file doesn't exist, create it, or append to the file
f, err := os.OpenFile("access.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
if _, err := f.Write([]byte("appended some data\n")); err != nil {
f.Close() // ignore error; Write error takes precedence
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func ExampleChmod() {
if err := os.Chmod("some-filename", 0644); err != nil {
log.Fatal(err)
}
}
func ExampleChtimes() {
mtime := time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC)
atime := time.Date(2007, time.March, 2, 4, 5, 6, 0, time.UTC)
if err := os.Chtimes("some-filename", atime, mtime); err != nil {
log.Fatal(err)
}
}
func ExampleFileMode() {
fi, err := os.Lstat("some-filename")
if err != nil {
log.Fatal(err)
}
fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc.
switch mode := fi.Mode(); {
case mode.IsRegular():
fmt.Println("regular file")
case mode.IsDir():
fmt.Println("directory")
case mode&fs.ModeSymlink != 0:
fmt.Println("symbolic link")
case mode&fs.ModeNamedPipe != 0:
fmt.Println("named pipe")
}
}
func ExampleErrNotExist() {
filename := "a-nonexistent-file"
if _, err := os.Stat(filename); errors.Is(err, fs.ErrNotExist) {
fmt.Println("file does not exist")
}
// Output:
// file does not exist
}
func ExampleExpand() {
mapper := func(placeholderName string) string {
switch placeholderName {
case "DAY_PART":
return "morning"
case "NAME":
return "Gopher"
}
return ""
}
fmt.Println(os.Expand("Good ${DAY_PART}, $NAME!", mapper))
// Output:
// Good morning, Gopher!
}
func ExampleExpandEnv() {
os.Setenv("NAME", "gopher")
os.Setenv("BURROW", "/usr/gopher")
fmt.Println(os.ExpandEnv("$NAME lives in ${BURROW}."))
// Output:
// gopher lives in /usr/gopher.
}
func ExampleLookupEnv() {
show := func(key string) {
val, ok := os.LookupEnv(key)
if !ok {
fmt.Printf("%s not set\n", key)
} else {
fmt.Printf("%s=%s\n", key, val)
}
}
os.Setenv("SOME_KEY", "value")
os.Setenv("EMPTY_KEY", "")
show("SOME_KEY")
show("EMPTY_KEY")
show("MISSING_KEY")
// Output:
// SOME_KEY=value
// EMPTY_KEY=
// MISSING_KEY not set
}
func ExampleGetenv() {
os.Setenv("NAME", "gopher")
os.Setenv("BURROW", "/usr/gopher")
fmt.Printf("%s lives in %s.\n", os.Getenv("NAME"), os.Getenv("BURROW"))
// Output:
// gopher lives in /usr/gopher.
}
func ExampleUnsetenv() {
os.Setenv("TMPDIR", "/my/tmp")
defer os.Unsetenv("TMPDIR")
}
func ExampleReadDir() {
files, err := os.ReadDir(".")
if err != nil {
log.Fatal(err)
}
for _, file := range files {
fmt.Println(file.Name())
}
}
func ExampleMkdirTemp() {
dir, err := os.MkdirTemp("", "example")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
file := filepath.Join(dir, "tmpfile")
if err := os.WriteFile(file, []byte("content"), 0666); err != nil {
log.Fatal(err)
}
}
func ExampleMkdirTemp_suffix() {
logsDir, err := os.MkdirTemp("", "*-logs")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(logsDir) // clean up
// Logs can be cleaned out earlier if needed by searching
// for all directories whose suffix ends in *-logs.
globPattern := filepath.Join(os.TempDir(), "*-logs")
matches, err := filepath.Glob(globPattern)
if err != nil {
log.Fatalf("Failed to match %q: %v", globPattern, err)
}
for _, match := range matches {
if err := os.RemoveAll(match); err != nil {
log.Printf("Failed to remove %q: %v", match, err)
}
}
}
func ExampleCreateTemp() {
f, err := os.CreateTemp("", "example")
if err != nil {
log.Fatal(err)
}
defer os.Remove(f.Name()) // clean up
if _, err := f.Write([]byte("content")); err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func ExampleCreateTemp_suffix() {
f, err := os.CreateTemp("", "example.*.txt")
if err != nil {
log.Fatal(err)
}
defer os.Remove(f.Name()) // clean up
if _, err := f.Write([]byte("content")); err != nil {
f.Close()
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
func ExampleReadFile() {
data, err := os.ReadFile("testdata/hello")
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(data)
// Output:
// Hello, Gophers!
}
func ExampleWriteFile() {
err := os.WriteFile("testdata/hello", []byte("Hello, Gophers!"), 0666)
if err != nil {
log.Fatal(err)
}
}
| [
"\"NAME\"",
"\"BURROW\""
]
| []
| [
"BURROW",
"NAME"
]
| [] | ["BURROW", "NAME"] | go | 2 | 0 | |
pkg/kubeutil/kubeutil.go | package kubeutil
import (
"errors"
"fmt"
"os"
"os/exec"
"path"
"strings"
)
import (
homedir "github.com/mitchellh/go-homedir"
)
type GetKubectlFunc func(kubectl *Kubectl, args ...string) (string, error)
type ExecKubectlFunc func(kubectl *Kubectl, args ...string) error
type InKubectlFunc func(kubectl *Kubectl, stdin string, args ...string) error
type GetInKubectlFunc func(kubectl *Kubectl, stdin string, args ...string) (string, error)
var GetKubectl GetKubectlFunc = func(kubectl *Kubectl, args ...string) (string, error) {
osCmd := exec.Command("kubectl", args...)
osCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubectl.KubeconfigPath))
osCmd.Stdin = os.Stdin
osCmd.Stderr = os.Stderr
outputBytes, err := osCmd.Output()
return string(outputBytes), err
}
var ExecKubectl ExecKubectlFunc = func(kubectl *Kubectl, args ...string) error {
osCmd := exec.Command("kubectl", args...)
osCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubectl.KubeconfigPath))
osCmd.Stdin = os.Stdin
osCmd.Stdout = os.Stdout
osCmd.Stderr = os.Stderr
return osCmd.Run()
}
var InKubectl InKubectlFunc = func(kubectl *Kubectl, stdin string, args ...string) error {
osCmd := exec.Command("kubectl", args...)
osCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubectl.KubeconfigPath))
osCmd.Stdin = strings.NewReader(stdin)
osCmd.Stdout = os.Stdout
osCmd.Stderr = os.Stderr
return osCmd.Run()
}
var GetInKubectl GetInKubectlFunc = func(kubectl *Kubectl, stdin string, args ...string) (string, error) {
osCmd := exec.Command("kubectl", args...)
osCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubectl.KubeconfigPath))
osCmd.Stdin = strings.NewReader(stdin)
osCmd.Stderr = os.Stderr
outputBytes, err := osCmd.Output()
return string(outputBytes), err
}
// Get the name by which the cluster recognizes a given host.
func NodeNameFromHost(kubectl *Kubectl, host string) (string, error) {
nodesOutput, err := GetKubectl(kubectl, "get", "nodes", "-o", "custom-columns=NODE:metadata.name,IP:status.addresses[?(@.type=='InternalIP')].address")
if err != nil {
return "", errors.New(strings.Join([]string{nodesOutput, err.Error()}, " "))
}
outputRows := strings.Split(nodesOutput, "\n")
if len(outputRows) < 2 {
return "", errors.New("No nodes found in this cluster")
}
nodeRows := outputRows[1:]
for _, nodeRow := range nodeRows {
if strings.HasPrefix(nodeRow, host) || strings.HasSuffix(nodeRow, host) {
return strings.Split(nodeRow, " ")[0], nil
}
}
return "", errors.New(fmt.Sprintf("Host: %s not found in this cluster", host))
}
func GetKubeConfigPath() (string, error) {
kubeconfigEnv := os.Getenv("KUBECONFIG")
if kubeconfigEnv != "" {
return kubeconfigEnv, nil
}
home, err := homedir.Dir()
if err != nil {
return "", err
}
return path.Join(home, ".kube", "config"), nil
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
chinadns/dnsrelay.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import socket
import errno
import struct
import logging
info = sys.version_info
if not (info[0] == 2 and info[1] >= 7):
print 'Python 2.7 required'
sys.exit(1)
import argparse
from shadowsocks import eventloop, asyncdns, lru_cache
BUF_SIZE = 16384
CACHE_TIMEOUT = 10
EMPTY_RESULT_DELAY = 4
GFW_LIST = set(["74.125.127.102", "74.125.155.102", "74.125.39.102",
"74.125.39.113", "209.85.229.138", "128.121.126.139",
"159.106.121.75", "169.132.13.103", "192.67.198.6",
"202.106.1.2", "202.181.7.85", "203.161.230.171",
"203.98.7.65", "207.12.88.98", "208.56.31.43",
"209.145.54.50", "209.220.30.174", "209.36.73.33",
"211.94.66.147", "213.169.251.35", "216.221.188.182",
"216.234.179.13", "243.185.187.39", "37.61.54.158",
"4.36.66.178", "46.82.174.68", "59.24.3.173", "64.33.88.161",
"64.33.99.47", "64.66.163.251", "65.104.202.252",
"65.160.219.113", "66.45.252.237", "72.14.205.104",
"72.14.205.99", "78.16.49.15", "8.7.198.45", "93.46.8.89"])
class DNSRelay(object):
def __init__(self, config):
self._loop = None
self._config = config
self._last_time = time.time()
self._local_addr = (config['local_address'], config['local_port'])
self._remote_addrs = []
for addr in config['dns'].split(','):
parts = addr.strip().rsplit(':', 1)
host = parts[0]
port = int(parts[1]) if len(parts) == 2 else 53
self._remote_addrs.append((host, port))
self._remote_addr = self._remote_addrs[-1]
self._hosts = {}
self._parse_hosts()
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
loop.add_handler(self.handle_events)
def _parse_hosts(self):
etc_path = '/etc/hosts'
if os.environ.__contains__('WINDIR'):
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if asyncdns.is_ip(ip):
for i in xrange(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
pass
@staticmethod
def build_response(request, ip):
addrs = socket.getaddrinfo(ip, 0, 0, 0, 0)
if not addrs:
return None
af, socktype, proto, canonname, sa = addrs[0]
header = struct.unpack('!HBBHHHH', request[:12])
header = struct.pack('!HBBHHHH', header[0], 0x80 | header[1], 0x80, 1,
1, 0, 0)
if af == socket.AF_INET:
qtype = asyncdns.QTYPE_A
else:
qtype = asyncdns.QTYPE_AAAA
addr = socket.inet_pton(af, ip)
question = request[12:]
# for hostname compression
answer = struct.pack('!H', ((128 + 64) << 8 | 12)) + \
struct.pack('!HHiH', qtype, asyncdns.QCLASS_IN, 300,
len(addr)) + addr
return header + question + answer
def handle_events(self, events):
pass
class UDPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._id_to_addr = lru_cache.LRUCache(CACHE_TIMEOUT)
self._local_sock = None
self._remote_sock = None
self._create_sockets()
self._pending_responses = []
def _create_sockets(self):
sockets = []
for addr in (self._local_addr, self._remote_addr):
addrs = socket.getaddrinfo(addr[0], addr[1], 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % addr)
af, socktype, proto, canonname, sa = addrs[0]
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sockets.append(sock)
self._local_sock, self._remote_sock = sockets
self._local_sock.bind(self._local_addr)
def _rebuild_sockets(self):
self._id_to_addr.clear()
self._loop.remove(self._local_sock)
self._loop.remove(self._remote_sock)
self._local_sock.close()
self._remote_sock.close()
self._create_sockets()
self._loop.add(self._local_sock, eventloop.POLL_IN)
self._loop.add(self._remote_sock, eventloop.POLL_IN)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._local_sock, eventloop.POLL_IN)
loop.add(self._remote_sock, eventloop.POLL_IN)
def _handle_local(self, sock):
try:
data, addr = sock.recvfrom(BUF_SIZE)
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.ECONNRESET:
# just for Windows lol
self._rebuild_sockets()
return
header = asyncdns.parse_header(data)
if header:
try:
req_id = header[0]
req = asyncdns.parse_response(data)
logging.info('request %s', req.hostname)
if req.hostname in self._hosts:
response = self.build_response(data,
self._hosts[req.hostname])
if response:
logging.info('%s hit /etc/hosts', req.hostname)
self._local_sock.sendto(response, addr)
return
self._id_to_addr[req_id] = addr
for remote_addr in self._remote_addrs:
self._remote_sock.sendto(data, remote_addr)
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
def _handle_remote(self, sock):
try:
data, addr = sock.recvfrom(BUF_SIZE)
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.ECONNRESET:
# just for Windows lol
self._rebuild_sockets()
return
if data:
try:
header = asyncdns.parse_header(data)
if header:
req_id = header[0]
res = asyncdns.parse_response(data)
logging.info('response from %s:%d %s', addr[0], addr[1],
res)
addr = self._id_to_addr.get(req_id, None)
if addr:
for answer in res.answers:
if answer and answer[0] in GFW_LIST:
return
if not res.answers:
# delay empty results
def _send_later():
self._local_sock.sendto(data, addr)
self._pending_responses.append((time.time(),
_send_later))
return
self._local_sock.sendto(data, addr)
del self._id_to_addr[req_id]
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
if eventloop.errno_from_exception(e) == errno.EACCES:
# when we have changed our ip
self._rebuild_sockets()
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._local_sock:
self._handle_local(sock)
elif sock == self._remote_sock:
self._handle_remote(sock)
now = time.time()
if now - self._last_time > CACHE_TIMEOUT / 2:
self._id_to_addr.sweep()
i = 0
for pending_response in self._pending_responses:
ts, cb = pending_response
if now - ts > EMPTY_RESULT_DELAY:
cb()
i += 1
else:
break
self._pending_responses = self._pending_responses[i:]
class TCPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._local_to_remote = {}
self._remote_to_local = {}
addrs = socket.getaddrinfo(self._local_addr[0], self._local_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % self._local_addr)
af, socktype, proto, canonname, sa = addrs[0]
self._listen_sock = socket.socket(af, socktype, proto)
self._listen_sock.setblocking(False)
self._listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._listen_sock.bind(self._local_addr)
self._listen_sock.listen(1024)
def _handle_conn(self, sock):
try:
local, addr = sock.accept()
addrs = socket.getaddrinfo(self._remote_addr[0],
self._remote_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
self._remote_addr)
af, socktype, proto, canonname, sa = addrs[0]
remote = socket.socket(af, socktype, proto)
local.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
remote.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self._local_to_remote[local] = remote
self._remote_to_local[remote] = local
self._loop.add(local, 0)
self._loop.add(remote, eventloop.POLL_OUT)
try:
remote.connect(self._remote_addr)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in (errno.EINPROGRESS,
errno.EAGAIN):
pass
else:
raise
except (OSError, IOError) as e:
logging.error(e)
def _destroy(self, local, remote):
if local in self._local_to_remote:
self._loop.remove(local)
self._loop.remove(remote)
del self._local_to_remote[local]
del self._remote_to_local[remote]
local.close()
remote.close()
else:
logging.error('already destroyed')
def _handle_local(self, local, event):
remote = self._local_to_remote[local]
if event & (eventloop.POLL_ERR | eventloop.POLL_HUP):
self._destroy(local, remote)
elif event & eventloop.POLL_IN:
try:
data = local.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
remote.send(data)
except (OSError, IOError) as e:
self._destroy(local, self._local_to_remote[local])
logging.error(e)
def _handle_remote(self, remote, event):
local = self._remote_to_local[remote]
if event & (eventloop.POLL_ERR | eventloop.POLL_HUP):
self._destroy(local, remote)
elif event & eventloop.POLL_OUT:
self._loop.modify(remote, eventloop.POLL_IN)
self._loop.modify(local, eventloop.POLL_IN)
elif event & eventloop.POLL_IN:
try:
data = remote.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
try:
res = asyncdns.parse_response(data[2:])
if res:
logging.info('response %s', res)
except Exception as e:
logging.error(e)
local.send(data)
except (OSError, IOError) as e:
self._destroy(local, remote)
logging.error(e)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._listen_sock, eventloop.POLL_IN)
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._listen_sock:
self._handle_conn(sock)
elif sock in self._local_to_remote:
self._handle_local(sock, event)
elif sock in self._remote_to_local:
self._handle_remote(sock, event)
# TODO implement timeout
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
parser = argparse.ArgumentParser(description='Forward DNS requests.')
parser.add_argument('-b', '--local_address', metavar='BIND_ADDR', type=str,
help='address that listens, default: 127.0.0.1',
default='127.0.0.1')
parser.add_argument('-p', '--local_port', metavar='BIND_PORT', type=int,
help='port that listens, default: 53', default=53)
parser.add_argument('-s', '--dns', metavar='DNS', type=str,
help='DNS server to use, default: '
'114.114.114.114,208.67.222.222,8.8.8.8',
default='114.114.114.114,208.67.222.222,8.8.8.8')
parser.add_argument('-l', '--ip_list', metavar='IP_LIST_FILE', type=str,
default=None)
config = vars(parser.parse_args())
if config['ip_list']:
logging.info('loading IP list from %s', config['ip_list'])
with open(config['ip_list'], 'rb') as f:
global GFW_LIST
GFW_LIST = set(f.readlines())
logging.info("starting dns at %s:%d",
config['local_address'], config['local_port'])
loop = eventloop.EventLoop()
try:
udprelay = UDPDNSRelay(config)
udprelay.add_to_loop(loop)
tcprelay = TCPDNSRelay(config)
tcprelay.add_to_loop(loop)
loop.run()
except (OSError, IOError) as e:
logging.error(e)
if eventloop.errno_from_exception(e) == errno.EACCES:
logging.info('please use sudo to run this program')
sys.exit(1)
if __name__ == '__main__':
main()
| []
| []
| [
"WINDIR"
]
| [] | ["WINDIR"] | python | 1 | 0 | |
aws/solutions/StackSetsResource/FunctionCode/lambda_function.py | """
StackSet via CloudFormation
"""
# Next ToDo:
# Allow for stack Retention
import boto3
from time import sleep
from botocore.exceptions import ClientError
import os
import crhelper
# initialise logger
logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"})
logger.info('Logging configured')
# set global to track init failures
init_failed = False
try:
# Place initialization code here
logger.info("Container initialization completed")
except Exception as e:
logger.error(e, exc_info=True)
init_failed = e
def get_stack_from_arn(arn):
# Given the ARN of a CloudFormation stack, return the stack name
(arn, partion, service, region, account, resourcepart) = arn.split(':', 5)
if ':' in resourcepart:
(resourcetype, resource) = resourcepart.split(':')
elif '/' in resourcepart:
resourceparts = resourcepart.split('/')
resource = resourceparts[1]
else:
(resource) = resourcepart
return(resource)
def change_requires_update(attributes, old_values, current_values):
# Given a list of attributes, compare the old and new values to see if
# there's been a change.
for attribute in attributes:
if (attribute not in old_values) and (attribute in current_values):
logger.debug("New value for %s: %s" % (attribute, current_values[attribute]))
return True
if (attribute in old_values) and (attribute not in current_values):
logger.debug("Value removed for %s: %s" % (attribute, old_values[attribute]))
return True
if (attribute in old_values) and (attribute in current_values):
logger.debug("Evaluating %s: %s vs. %s" % (attribute, current_values[attribute], old_values[attribute]))
if current_values[attribute] != old_values[attribute]:
return True
return False
def convert_ops_prefs(ops_prefs):
# CloudFormation parameters are all strings. We need to convert numeric
# values in the ops_prefs JSON object to ints before we can call the API
logger.info("Converting Operation Preferences values")
converted_ops_prefs = {}
needs_conversion = set(['FailureToleranceCount', 'FailureTolerancePercentage', 'MaxConcurrentCount', 'MaxConcurrentPercentage'])
for key, value in ops_prefs.items():
logger.debug("Evaluating %s : %s" % (key, value))
if key in needs_conversion:
logger.debug("Converting %s" % key)
converted_ops_prefs[key] = int(value)
elif key == 'RegionOrder':
converted_ops_prefs['RegionOrder'] = value
else:
logger.warning("Warning: Skipping unknown key: %s in Operation Preferences" % key)
return(converted_ops_prefs)
def expand_tags(tags):
# We get the Tags as a list of key, value pairs, but CloudFormation needs
# them exploded out to Key: key, Value: value.
tags_array = []
for tag in tags:
logger.debug(tag)
key, value = list(tag.items())[0]
tags_array.append({'Key': key, 'Value': value})
return(tags_array)
def expand_parameters(params):
# We get the Parameters as a list of key, value pairs, but CloudFormation
# needs them exploded out to ParameterKey: key, ParameterValue: value.
params_array = []
for param in params:
logger.debug(param)
key, value = list(param.items())[0]
params_array.append({'ParameterKey': key, 'ParameterValue': value})
return(params_array)
def flatten_stacks(stackinstances):
# Stack instances are defined across accounts and regions + parameter
# overrides. We want to expand all combinations before we take action.
flat_stacks = {}
for instance in stackinstances:
for account in instance['Accounts']:
for region in instance['Regions']:
tuple = ("%s/%s" % (account, region))
if tuple in flat_stacks:
raise Exception("%s / %s is defined multiple times" % (account, region))
if 'ParameterOverrides' in instance:
flat_stacks[tuple] = instance['ParameterOverrides']
else:
flat_stacks[tuple] = []
return(flat_stacks)
def group_by_account(set, flat_stacks):
# Group regions by account and overrides
grouped_accounts = {}
for instance in set:
account, region = instance.split('/')
if account in grouped_accounts:
if flat_stacks[instance] == grouped_accounts[account]['overrides']:
grouped_accounts[account]['regions'].append(region)
else:
raise Exception("The overrides didn't match account group for %s" % instance)
else:
grouped_accounts[account] = {'regions': [region],
'overrides': flat_stacks[instance]}
return(grouped_accounts)
def aggregate_instances(account_list, flat_stacks):
# First group regions by account and overrides
accounts = group_by_account(account_list, flat_stacks)
# Aggregate accounts into instances with similar regions to reduce number
# of API calls
instances = []
while accounts.keys():
instance = {}
aggregated_accounts = []
(source_account, values) = accounts.popitem()
for account in accounts:
if accounts[account] == values:
aggregated_accounts.append(account)
for account in aggregated_accounts:
accounts.pop(account)
aggregated_accounts.append(source_account)
instance = {'accounts': aggregated_accounts,
'regions': values['regions'],
'overrides': values['overrides']}
instances.append(instance)
logger.debug(instances)
return(instances)
def launch_stacks(set_region, set_name, accts, regions, param_overrides,
ops_prefs):
# Wrapper for create_stack_instances
sleep_time = 15
retries = 20
this_try = 0
logger.info("Creating stacks with op prefs %s" % ops_prefs)
logger.debug("StackSetName: %s, Accounts: %s, Regions: %s, ParameterOverrides: %s" % (set_name, accts, regions, param_overrides))
while True:
try:
client = boto3.client('cloudformation', region_name=set_region)
response = client.create_stack_instances(
StackSetName=set_name,
Accounts=accts,
Regions=regions,
ParameterOverrides=param_overrides,
OperationPreferences=ops_prefs,
# OperationId='string'
)
return(response)
except ClientError as e:
if e.response['Error']['Code'] == 'OperationInProgressException':
this_try += 1
if this_try == retries:
return("Failed to launch stacks after %s tries" % this_try)
else:
logger.warning("Operation in progress for %s in %s. Sleeping for %i seconds." % (set_name, set_region, sleep_time))
sleep(sleep_time)
continue
elif e.response['Error']['Code'] == 'StackSetNotFoundException':
raise Exception("No StackSet matching %s found in %s. You must create before launching stacks." % (set_name, set_region))
else:
raise Exception("Error launching stack instance: %s" % e)
def update_stacks(set_region, set_name, accts, regions, param_overrides,
ops_prefs):
# Wrapper for update_stack_instances
sleep_time = 15
retries = 20
this_try = 0
logger.info("Updating stacks with op prefs %s" % ops_prefs)
# UpdateStackInstance only allows stackSetName, not stackSetId,
# so we need to truncate.
(set_name, uid) = set_name.split(':')
logger.debug("StackSetName: %s, Accounts: %s, Regions: %s, ParameterOverrides: %s" % (set_name, accts, regions, param_overrides))
while True:
try:
client = boto3.client('cloudformation', region_name=set_region)
response = client.update_stack_instances(
StackSetName=set_name,
Accounts=accts,
Regions=regions,
ParameterOverrides=param_overrides,
OperationPreferences=ops_prefs,
# OperationId='string'
)
return(response)
except ClientError as e:
if e.response['Error']['Code'] == 'OperationInProgressException':
this_try += 1
if this_try == retries:
return("Failed to update stacks after %s tries" % this_try)
else:
logger.warning("Operation in progress for %s in %s. Sleeping for %i seconds." % (set_name, set_region, sleep_time))
sleep(sleep_time)
continue
elif e.response['Error']['Code'] == 'StackSetNotFoundException':
raise Exception("No StackSet matching %s found in %s. You must create before launching stacks." % (set_name, set_region))
else:
raise Exception("Unexpected error: %s" % e)
def delete_stacks(set_region, set_id, accts, regions, ops_prefs):
# Wrapper for delete_stack_instances
sleep_time = 15
retries = 20
this_try = 0
logger.info("Deleting stacks with op prefs %s" % ops_prefs)
logger.debug("StackSetName: %s, Accounts: %s, Regions: %s" % (set_id, accts, regions))
while True:
try:
client = boto3.client('cloudformation', region_name=set_region)
response = client.delete_stack_instances(
StackSetName=set_id,
Accounts=accts,
Regions=regions,
OperationPreferences=ops_prefs,
RetainStacks=False,
# OperationId='string'
)
return(response)
except ClientError as e:
if e.response['Error']['Code'] == 'OperationInProgressException':
this_try += 1
if this_try == retries:
return("Failed to delete stacks after %s tries" % this_try)
else:
logger.warning("Operation in progress for %s in %s. Sleeping for %i seconds." % (set_id, set_region, sleep_time))
sleep(sleep_time)
continue
elif e.response['Error']['Code'] == 'StackSetNotFoundException':
return("No StackSet matching %s found in %s. You must create before launching stacks." % (set_id, set_region))
else:
return("Unexpected error: %s" % e)
def update_stack_set(set_region, set_id, set_description, set_template,
set_parameters, set_capabilities, set_tags, ops_prefs):
# Set up for retries
sleep_time = 15
retries = 20
this_try = 0
client = boto3.client('cloudformation', region_name=set_region)
# Retry loop
while True:
try:
response = client.update_stack_set(
StackSetName=set_id,
Description=set_description,
TemplateURL=set_template,
# TemplateBody='string',
# UsePreviousTemplate=True|False,
Parameters=set_parameters,
Capabilities=set_capabilities,
Tags=set_tags,
OperationPreferences=ops_prefs
# OperationId='string'
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return set_id
else:
raise Exception("HTTP Error: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] == 'OperationInProgressException':
this_try += 1
if this_try == retries:
raise Exception("Failed to delete StackSet after %s tries." % this_try)
else:
logger.warning("Operation in progress for %s. Sleeping for %i seconds." % (set_id, sleep_time))
sleep(sleep_time)
continue
elif e.response['Error']['Code'] == 'StackSetNotEmptyException':
raise Exception("There are still stacks in set %s. You must delete these first." % (set_id))
else:
raise Exception("Unexpected error: %s" % e)
def create(event, context):
"""
Handle StackSetResource CREATE events.
Create StackSet resource and any stack instances specified in the template.
"""
# Collect everything we need to create the stack set
# optional
if 'StackSetName' in event['ResourceProperties']:
set_name = event['ResourceProperties']['StackSetName']
else:
set_name = "%s-%s" % (get_stack_from_arn(event['StackId']), event['LogicalResourceId'])
if 'StackSetDescription' in event['ResourceProperties']:
set_description = event['ResourceProperties']['StackSetDescription']
else:
set_description = "This StackSet belongs to the CloudFormation stack %s." % get_stack_from_arn(event['StackId'])
if 'OperationPreferences' in event['ResourceProperties']:
set_opsprefs = convert_ops_prefs(event['ResourceProperties']['OperationPreferences'])
else:
set_opsprefs = {}
if 'Tags' in event['ResourceProperties']:
set_tags = expand_tags(event['ResourceProperties']['Tags'])
else:
set_tags = []
if 'Capabilities' in event['ResourceProperties']:
set_capabilities = event['ResourceProperties']['Capabilities']
else:
set_capabilities = ''
if 'AdministrationRoleARN' in event['ResourceProperties']:
set_admin_role_arn = event['ResourceProperties']['AdministrationRoleARN']
else:
set_admin_role_arn = ''
if 'ExecutionRoleName' in event['ResourceProperties']:
set_exec_role_name = event['ResourceProperties']['ExecutionRoleName']
else:
set_exec_role_name = ''
if 'Parameters' in event['ResourceProperties']:
set_parameters = expand_parameters(event['ResourceProperties']['Parameters'])
else:
set_parameters = []
# Required
set_template = event['ResourceProperties']['TemplateURL']
# Create the StackSet
try:
client = boto3.client('cloudformation',
region_name=os.environ['AWS_REGION'])
response = client.create_stack_set(
StackSetName=set_name,
Description=set_description,
TemplateURL=set_template,
# TemplateBody='string',
Parameters=set_parameters,
Capabilities=set_capabilities,
Tags=set_tags,
AdministrationRoleARN=set_admin_role_arn,
ExecutionRoleName=set_exec_role_name
# ClientRequestToken='string'
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
set_id = response['StackSetId']
else:
raise Exception("HTTP Error: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] == 'NameAlreadyExistsException':
raise Exception("A StackSet called %s already exists." % set_name)
else:
raise Exception("Unexpected error: %s" % e)
logger.info("Created StackSet: %s" % set_id)
physical_resource_id = set_id
# Deploy stack to accounts and regions if defined.
# We're going to switch from a single stack instance definition to an array
# of stack instance objects. This will allow more complex stack structures
# across accounts and regions, including parameter overrides
# Iterate over stack instances
for instance in event['ResourceProperties']['StackInstances']:
if 'ParameterOverrides' in instance:
param_overrides = expand_parameters(instance['ParameterOverrides'])
else:
param_overrides = []
logger.debug("Stack Instance: Regions: %s : Accounts: %s : Parameters: %s" % (instance['Regions'], instance['Accounts'], param_overrides))
# Make sure every stack instance defines both a list of accounts and
# a list of regions
if instance['Regions'][0] != '' and instance['Accounts'][0] == '':
raise Exception("You must specify at least one account with a list of regions.")
elif instance['Regions'][0] == '' and instance['Accounts'][0] != '':
raise Exception("You must specify at least one region with a list of accounts.")
elif instance['Regions'][0] != '' and instance['Accounts'][0] != '':
logger.info("Launching stacks in accounts: %s and regions: %s" % (instance['Accounts'], instance['Regions']))
response = launch_stacks(
os.environ['AWS_REGION'],
set_id,
instance['Accounts'],
instance['Regions'],
param_overrides,
set_opsprefs
)
logger.debug(response)
response_data = {}
return physical_resource_id, response_data
def update(event, context):
"""
Handle StackSetResource UPDATE events.
Update StackSet resource and/or any stack instances specified in the template.
"""
# Collect everything we need to update the stack set
set_id = event['PhysicalResourceId']
# Process the Operational Preferences (if any)
if 'OperationPreferences' in event['ResourceProperties']:
set_opsprefs = convert_ops_prefs(event['ResourceProperties']['OperationPreferences'])
else:
set_opsprefs = {}
logger.debug("OperationPreferences: %s" % set_opsprefs)
# Circumstances under which we update the StackSet itself
stack_set_attributes = [
'TemplateURL',
'Parameters',
'Tags',
'Capabilities',
'StackSetDecription'
]
stack_set_needs_update = change_requires_update(stack_set_attributes,
event['OldResourceProperties'],
event['ResourceProperties'])
if stack_set_needs_update:
logger.info("Changes impacting StackSet detected")
# Optional properties
logger.info("Evaluating optional properties")
if 'StackSetDescription' in event['ResourceProperties']:
set_description = event['ResourceProperties']['StackSetDescription']
elif 'StackSetDescription' in event['OldResourceProperties']:
set_description = event['OldResourceProperties']['StackSetDescription']
else:
set_description = "This StackSet belongs to the CloudFormation stack %s." % get_stack_from_arn(event['StackId'])
logger.debug("StackSetDescription: %s" % set_description)
if 'Capabilities' in event['ResourceProperties']:
set_capabilities = event['ResourceProperties']['Capabilities']
elif 'Capabilities' in event['OldResourceProperties']:
set_capabilities = event['OldResourceProperties']['Capabilities']
else:
set_capabilities = []
logger.debug("Capabilities: %s" % set_capabilities)
if 'Tags' in event['ResourceProperties']:
set_tags = expand_tags(event['ResourceProperties']['Tags'])
elif 'Tags' in event['OldResourceProperties']:
set_tags = expand_tags(event['OldResourceProperties']['Tags'])
else:
set_tags = []
logger.debug("Tags: %s" % set_tags)
if 'Parameters' in event['ResourceProperties']:
set_parameters = expand_parameters(event['ResourceProperties']['Parameters'])
elif 'Parameters' in event['OldResourceProperties']:
set_parameters = expand_parameters(event['OldResourceProperties']['Parameters'])
else:
set_parameters = []
logger.debug("Parameters: %s" % set_parameters)
# Required properties
logger.info("Evaluating required properties")
if 'TemplateURL' in event['ResourceProperties']:
set_template = event['ResourceProperties']['TemplateURL']
elif 'TemplateURL' in event['OldResourceProperties']:
set_template = event['OldResourceProperties']['TemplateURL']
else:
raise Exception('Template URL not found during update event')
logger.debug("TemplateURL: %s" % set_template)
# Update the StackSet
logger.info("Updating StackSet resource %s" % set_id)
update_stack_set(os.environ['AWS_REGION'], set_id, set_description,
set_template, set_parameters, set_capabilities,
set_tags, set_opsprefs)
# Now, look for changes to stack instances
logger.info("Evaluating stack instances")
# Flatten all the account/region tuples to compare differences
if 'StackInstances' in event['ResourceProperties']:
new_stacks = flatten_stacks(event['ResourceProperties']['StackInstances'])
else:
new_stacks = []
if 'StackInstances' in event['OldResourceProperties']:
old_stacks = flatten_stacks(event['OldResourceProperties']['StackInstances'])
else:
old_stacks = []
# Evaluate all differences we need to handle
to_add = list(set(new_stacks) - set(old_stacks))
to_delete = list(set(old_stacks) - set(new_stacks))
to_compare = list(set(old_stacks).intersection(new_stacks))
# Launch all new stack instances
if to_add:
logger.info("Adding stack instances: %s" % to_add)
# Aggregate accounts with similar regions to reduce number of API calls
add_instances = aggregate_instances(to_add, new_stacks)
# Add stack instances
for instance in add_instances:
logger.debug("Add aggregated accounts: %s and regions: %s and overrides: %s" % (instance['accounts'], instance['regions'], instance['overrides']))
if 'overrides' in instance:
param_overrides = expand_parameters(instance['overrides'])
else:
param_overrides = []
response = launch_stacks(
os.environ['AWS_REGION'],
set_id,
instance['accounts'],
instance['regions'],
param_overrides,
set_opsprefs
)
logger.debug(response)
# Delete all old stack instances
if to_delete:
logger.info("Deleting stack instances: %s" % to_delete)
# Aggregate accounts with similar regions to reduce number of API calls
delete_instances = aggregate_instances(to_delete, old_stacks)
# Add stack instances
for instance in delete_instances:
logger.debug("Delete aggregated accounts: %s and regions: %s" % (instance['accounts'], instance['regions']))
response = delete_stacks(
os.environ['AWS_REGION'],
set_id,
instance['accounts'],
instance['regions'],
set_opsprefs
)
logger.debug(response)
# Determine if any existing instances need to be updated
if to_compare:
logger.info("Examining stack instances: %s" % to_compare)
# Update any stacks in both lists, but with new overrides
to_update = []
for instance in to_compare:
if old_stacks[instance] == new_stacks[instance]:
logger.debug("%s: SAME!" % instance)
else:
logger.debug("%s: DIFFERENT!" % instance)
to_update.append(instance)
# Aggregate accounts with similar regions to reduce number of API calls
update_instances = aggregate_instances(to_update, new_stacks)
for instance in update_instances:
logger.debug("Update aggregated accounts: %s and regions: %s with overrides %s" % (instance['accounts'], instance['regions'], instance['overrides']))
if 'overrides' in instance:
param_overrides = expand_parameters(instance['overrides'])
else:
param_overrides = []
response = update_stacks(
os.environ['AWS_REGION'],
set_id,
instance['accounts'],
instance['regions'],
param_overrides,
set_opsprefs
)
logger.debug(response)
physical_resource_id = set_id
response_data = {}
return physical_resource_id, response_data
def delete(event, context):
"""
Handle StackSetResource DELETE events.
Delete StackSet resource and any stack instances specified in the template.
"""
# Set up for retries
sleep_time = 15
retries = 20
this_try = 0
# Collect everything we need to delete the stack set
set_id = event['PhysicalResourceId']
if set_id == 'NONE':
# This is a rollback from a failed create. Nothing to do.
return
# First, we need to tear down all of the stacks associated with this
# stack set
if 'StackInstances' in event['ResourceProperties']:
# Check for Operation Preferences
if 'OperationPreferences' in event['ResourceProperties']:
set_opsprefs = convert_ops_prefs(event['ResourceProperties']['OperationPreferences'])
else:
set_opsprefs = {}
# Iterate over stack instances
for instance in event['ResourceProperties']['StackInstances']:
logger.debug("Stack Instance: Regions: %s : Accounts: %s" % (instance['Regions'], instance['Accounts']))
logger.info("Removing existing stacks from stack set %s" % set_id)
response = delete_stacks(
os.environ['AWS_REGION'],
set_id,
instance['Accounts'],
instance['Regions'],
set_opsprefs
)
logger.debug(response)
client = boto3.client('cloudformation',
region_name=os.environ['AWS_REGION'])
# Retry loop
logger.info('Deleting stack set')
while True:
try:
response = client.delete_stack_set(
StackSetName=set_id
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return
else:
raise Exception("HTTP Error: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] == 'OperationInProgressException':
this_try += 1
if this_try == retries:
raise Exception("Failed to delete StackSet after %s tries." % this_try)
else:
logger.warning("Operation in progress for %s. Sleeping for %i seconds." % (set_id, sleep_time))
sleep(sleep_time)
continue
elif e.response['Error']['Code'] == 'StackSetNotEmptyException':
raise Exception("There are still stacks in set %s. You must delete these first." % (set_id))
else:
raise Exception("Unexpected error: %s" % e)
def handler(event, context):
"""
Main handler function, passes off it's work to crhelper's cfn_handler
"""
# update the logger with event info
global logger
logger = crhelper.log_config(event)
return crhelper.cfn_handler(event, context, create, update, delete, logger,
init_failed)
| []
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | python | 1 | 0 | |
main_dynamic_response_generation.py | import os
import argparse
from prompts.generic_prompt import evalute_ppl, generate_response_dynamic
from prompts.bAbi_dialogue import convert_sample_to_shot_bAbi
from metric.scorer_parse import score
from utils.utils import load_model, save_file, checker_file
os.environ["TOKENIZERS_PARALLELISM"] = "false"
mapper = {
"babi5": {"shot_converter":convert_sample_to_shot_bAbi,
"shot_converter_inference": convert_sample_to_shot_bAbi,
"file_data":"data/dialog-bAbI-tasks/bAbI-dial-5-","with_knowledge":None,
"shots":{1024:[0,1,2],2048:[1,0,1,2]},"shot_separator":"\n\n",
"meta_type":"alldynamic","gen_len":50,"max_number_turns":3},
"babi5-OOV": {"shot_converter":convert_sample_to_shot_bAbi,
"shot_converter_inference": convert_sample_to_shot_bAbi,
"file_data":"data/dialog-bAbI-tasks/bAbI-dial-5-OOV-","with_knowledge":None,
"shots":{1024:[0,1,2],2048:[0,1,2]},"shot_separator":"\n\n",
"meta_type":"alldynamic","gen_len":50,"max_number_turns":3},
"babi6": {"shot_converter":convert_sample_to_shot_bAbi,
"shot_converter_inference": convert_sample_to_shot_bAbi,
"file_data":"data/dialog-bAbI-tasks/bAbI-dial-6-","with_knowledge":None,
"shots":{1024:[0,1,2],2048:[0,1,2]},"shot_separator":"\n\n",
"meta_type":"alldynamic","gen_len":50,"max_number_turns":3},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_checkpoint", default="gpt2",type=str,required=True)
parser.add_argument("--dataset", default="flowMWOZ",type=str,required=True)
parser.add_argument("--filedata", default="data/flowMWOZ/test_dynamic.json",type=str,required=True)
parser.add_argument("--gpu", type=int, default=-1)
parser.add_argument("--beam", type=int, default=1)
parser.add_argument("--sample_times", type=int, default=3)
parser.add_argument("--do_sample", action='store_true', help="sample n times and rescore based on ppl")
parser.add_argument("--multigpu", action='store_true', help="run on multiple gpus")
parser.add_argument("--verbose", action='store_true', help="run on multiple gpus")
args = parser.parse_args()
if args.gpu >=0:
device = f'cuda:{args.gpu}'
else:
device = "cpu"
beam = args.beam
model_checkpoint = args.model_checkpoint
model, tokenizer, max_seq = load_model(args,model_checkpoint,device)
list_of_dataset = args.dataset.split(",")
d = list_of_dataset[0]
print(f"EVALUATING DATASET {d} on {model_checkpoint} with beam size {beam}")
name_experiment = args.filedata.split("test_dynamic_")[-1].replace(".json","")
print(name_experiment)
first_time = True
for shots in mapper[d]["shots"][max_seq]:
if shots == 0 and not first_time: continue
first_time = False
print(f"RUNNING {shots}")
if checker_file(f"{d}_{shots}_{model_checkpoint}_{name_experiment}.json") or args.verbose:
generation_out = generate_response_dynamic(model, tokenizer, shot_converter=mapper[d]["shot_converter"],
file_to_eval=args.filedata, prefix=shots,
device=device, max_number_turns=mapper[d]["max_number_turns"],
level=None,
meta_type=mapper[d]["meta_type"], gen_len=mapper[d]["gen_len"],
beam=beam, max_seq=max_seq, eos_token_id=198,
do_sample=args.do_sample, multigpu=args.multigpu,verbose=args.verbose)
res_score = score(files_test=args.filedata,files_to_score=generation_out, meta_type=d)
print(res_score)
ppl_score = evalute_ppl(model, tokenizer, shot_converter=mapper[d]["shot_converter"],
file_to_eval=args.filedata,
prefix=shots, device=device, max_number_turns=mapper[d]["max_number_turns"],
level=None, max_seq=max_seq,
meta_type=mapper[d]["meta_type"], verbose=args.verbose)
res_score["ppl"] = ppl_score
print(res_score)
save_file(f"{d}_{shots}_{model_checkpoint}_{name_experiment}.json", {"score":res_score,"generation":generation_out})
| []
| []
| [
"TOKENIZERS_PARALLELISM"
]
| [] | ["TOKENIZERS_PARALLELISM"] | python | 1 | 0 | |
integration/client/client_windows_test.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/osversion"
_ "github.com/Microsoft/hcsshim/test/functional/manifest" // For rsrc_amd64.syso
)
const (
defaultAddress = `\\.\pipe\containerd-containerd-test`
)
var (
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test")
testImage string
testMultiLayeredImage = "ghcr.io/containerd/volume-copy-up:2.1"
shortCommand = withTrue()
longCommand = withProcessArgs("ping", "-t", "localhost")
)
func init() {
b := osversion.Build()
switch b {
case osversion.RS1:
testImage = "mcr.microsoft.com/windows/nanoserver:sac2016"
case osversion.RS3:
testImage = "mcr.microsoft.com/windows/nanoserver:1709"
case osversion.RS4:
testImage = "mcr.microsoft.com/windows/nanoserver:1803"
case osversion.RS5:
testImage = "mcr.microsoft.com/windows/nanoserver:1809"
case osversion.V19H1:
testImage = "mcr.microsoft.com/windows/nanoserver:1903"
case osversion.V19H2:
testImage = "mcr.microsoft.com/windows/nanoserver:1909"
case osversion.V20H1:
testImage = "mcr.microsoft.com/windows/nanoserver:2004"
case osversion.V20H2:
testImage = "mcr.microsoft.com/windows/nanoserver:20H2"
default:
fmt.Println("No test image defined for Windows build version:", b)
panic("No windows test image found for this Windows build")
}
fmt.Println("Windows test image:", testImage, ", Windows build version:", b)
}
| [
"\"programfiles\"",
"\"programfiles\""
]
| []
| [
"programfiles"
]
| [] | ["programfiles"] | go | 1 | 0 | |
netmiko/utilities.py | """Miscellaneous utility functions."""
from typing import (
Any,
AnyStr,
TypeVar,
Callable,
cast,
Optional,
Union,
List,
Dict,
Tuple,
)
from typing import TYPE_CHECKING
from glob import glob
import sys
import io
import os
from pathlib import Path
import functools
from datetime import datetime
import importlib.resources as pkg_resources
from textfsm import clitable
from textfsm.clitable import CliTableError
from netmiko import log
# For decorators
F = TypeVar("F", bound=Callable[..., Any])
if TYPE_CHECKING:
from netmiko.base_connection import BaseConnection
from os import PathLike
try:
from ttp import ttp
TTP_INSTALLED = True
except ImportError:
TTP_INSTALLED = False
try:
from genie.conf.base import Device
from genie.libs.parser.utils import get_parser
from pyats.datastructures import AttrDict
GENIE_INSTALLED = True
except ImportError:
GENIE_INSTALLED = False
try:
import serial.tools.list_ports
PYSERIAL_INSTALLED = True
except ImportError:
PYSERIAL_INSTALLED = False
# Dictionary mapping 'show run' for vendors with different command
SHOW_RUN_MAPPER = {
"brocade_fos": "configShow",
"juniper": "show configuration",
"juniper_junos": "show configuration",
"extreme": "show configuration",
"extreme_ers": "show running-config",
"extreme_exos": "show configuration",
"extreme_netiron": "show running-config",
"extreme_nos": "show running-config",
"extreme_slx": "show running-config",
"extreme_vdx": "show running-config",
"extreme_vsp": "show running-config",
"extreme_wing": "show running-config",
"ericsson_ipos": "show configuration",
"hp_comware": "display current-configuration",
"huawei": "display current-configuration",
"fortinet": "show full-configuration",
"checkpoint": "show configuration",
"cisco_wlc": "show run-config",
"enterasys": "show running-config",
"dell_force10": "show running-config",
"avaya_vsp": "show running-config",
"avaya_ers": "show running-config",
"brocade_vdx": "show running-config",
"brocade_nos": "show running-config",
"brocade_fastiron": "show running-config",
"brocade_netiron": "show running-config",
"alcatel_aos": "show configuration snapshot",
"cros_mtbr": "show running-config",
}
# Expand SHOW_RUN_MAPPER to include '_ssh' key
new_dict = {}
for k, v in SHOW_RUN_MAPPER.items():
new_key = k + "_ssh"
new_dict[k] = v
new_dict[new_key] = v
SHOW_RUN_MAPPER = new_dict
# Default location of netmiko temp directory for netmiko tools
NETMIKO_BASE_DIR = "~/.netmiko"
def load_yaml_file(yaml_file: Union[str, bytes, "PathLike[Any]"]) -> Any:
"""Read YAML file."""
try:
import yaml
except ImportError:
sys.exit("Unable to import yaml module.")
try:
with io.open(yaml_file, "rt", encoding="utf-8") as fname:
return yaml.safe_load(fname)
except IOError:
sys.exit("Unable to open YAML file")
def load_devices(file_name: Union[str, bytes, "PathLike[Any]", None] = None) -> Any:
"""Find and load .netmiko.yml file."""
yaml_devices_file = find_cfg_file(file_name)
return load_yaml_file(yaml_devices_file)
def find_cfg_file(
file_name: Union[str, bytes, "PathLike[Any]", None] = None
) -> Union[str, bytes, "PathLike[Any]"]:
"""
Search for netmiko_tools inventory file in the following order:
NETMIKO_TOOLS_CFG environment variable
Current directory
Home directory
Look for file named: .netmiko.yml or netmiko.yml
Also allow NETMIKO_TOOLS_CFG to point directly at a file
"""
if file_name and os.path.isfile(file_name):
return file_name
optional_path = os.environ.get("NETMIKO_TOOLS_CFG", "")
if os.path.isfile(optional_path):
return optional_path
search_paths = [optional_path, ".", os.path.expanduser("~")]
# Filter optional_path if null
search_paths = [path for path in search_paths if path]
for path in search_paths:
files = glob(f"{path}/.netmiko.yml") + glob(f"{path}/netmiko.yml")
if files:
return files[0]
raise IOError(
".netmiko.yml file not found in NETMIKO_TOOLS environment variable directory,"
" current directory, or home directory."
)
def display_inventory(my_devices: Dict[str, Union[List[str], Dict[str, Any]]]) -> None:
"""Print out inventory devices and groups."""
inventory_groups = ["all"]
inventory_devices = []
for k, v in my_devices.items():
if isinstance(v, list):
inventory_groups.append(k)
elif isinstance(v, dict):
inventory_devices.append((k, v["device_type"]))
inventory_groups.sort()
inventory_devices.sort(key=lambda x: x[0])
print("\nDevices:")
print("-" * 40)
for a_device, device_type in inventory_devices:
device_type = f" ({device_type})"
print(f"{a_device:<25}{device_type:>15}")
print("\n\nGroups:")
print("-" * 40)
for a_group in inventory_groups:
print(a_group)
print()
def obtain_all_devices(
my_devices: Dict[str, Union[List[str], Dict[str, Any]]]
) -> Dict[str, Dict[str, Any]]:
"""Dynamically create 'all' group."""
new_devices = {}
for device_name, device_or_group in my_devices.items():
# Skip any groups
if not isinstance(device_or_group, list):
new_devices[device_name] = device_or_group
return new_devices
def obtain_netmiko_filename(device_name: str) -> str:
"""Create file name based on device_name."""
_, netmiko_full_dir = find_netmiko_dir()
return f"{netmiko_full_dir}/{device_name}.txt"
def write_tmp_file(device_name: str, output: str) -> str:
file_name = obtain_netmiko_filename(device_name)
with open(file_name, "w") as f:
f.write(output)
return file_name
def ensure_dir_exists(verify_dir: str) -> None:
"""Ensure directory exists. Create if necessary."""
if not os.path.exists(verify_dir):
# Doesn't exist create dir
os.makedirs(verify_dir)
else:
# Exists
if not os.path.isdir(verify_dir):
# Not a dir, raise an exception
raise ValueError(f"{verify_dir} is not a directory")
def find_netmiko_dir() -> Tuple[str, str]:
"""Check environment first, then default dir"""
try:
netmiko_base_dir = os.environ["NETMIKO_DIR"]
except KeyError:
netmiko_base_dir = NETMIKO_BASE_DIR
netmiko_base_dir = os.path.expanduser(netmiko_base_dir)
if netmiko_base_dir == "/":
raise ValueError("/ cannot be netmiko_base_dir")
netmiko_full_dir = f"{netmiko_base_dir}/tmp"
return (netmiko_base_dir, netmiko_full_dir)
def write_bytes(out_data: AnyStr, encoding: str = "ascii") -> bytes:
"""Legacy for Python2 and Python3 compatible byte stream."""
if sys.version_info[0] >= 3:
if isinstance(out_data, str):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, bytes):
return out_data
msg = f"Invalid value for out_data neither unicode nor byte string: {str(out_data)}"
raise ValueError(msg)
def check_serial_port(name: str) -> str:
"""returns valid COM Port."""
if not PYSERIAL_INSTALLED:
msg = (
"\npyserial is not installed. Please PIP install pyserial:\n\n"
"pip install pyserial\n\n"
)
raise ValueError(msg)
try:
cdc = next(serial.tools.list_ports.grep(name))
serial_port = cdc[0]
assert isinstance(serial_port, str)
return serial_port
except StopIteration:
msg = f"device {name} not found. "
msg += "available devices are: "
ports = list(serial.tools.list_ports.comports())
for p in ports:
msg += f"{str(p)},"
raise ValueError(msg)
def get_template_dir(_skip_ntc_package: bool = False) -> str:
"""
Find and return the directory containing the TextFSM index file.
Order of preference is:
1) Find directory in `NET_TEXTFSM` Environment Variable.
2) Check for pip installed `ntc-templates` location in this environment.
3) ~/ntc-templates/ntc_templates/templates.
If `index` file is not found in any of these locations, raise ValueError
:return: directory containing the TextFSM index file
"""
msg = """
Directory containing TextFSM index file not found.
Please set the NET_TEXTFSM environment variable to point at the directory containing your TextFSM
index file.
Alternatively, `pip install ntc-templates` (if using ntc-templates).
"""
# Try NET_TEXTFSM environment variable
template_dir = os.environ.get("NET_TEXTFSM")
if template_dir is not None:
template_dir = os.path.expanduser(template_dir)
index = os.path.join(template_dir, "index")
if not os.path.isfile(index):
# Assume only base ./ntc-templates specified
template_dir = os.path.join(template_dir, "templates")
else:
# Try 'pip installed' ntc-templates
try:
with pkg_resources.path( # type: ignore
package="ntc_templates", resource="parse.py"
) as posix_path:
# Example: /opt/venv/netmiko/lib/python3.8/site-packages/ntc_templates/templates
template_dir = str(posix_path.parent.joinpath("templates"))
# This is for Netmiko automated testing
if _skip_ntc_package:
raise ModuleNotFoundError()
except ModuleNotFoundError:
# Finally check in ~/ntc-templates/ntc_templates/templates
home_dir = os.path.expanduser("~")
template_dir = os.path.join(
home_dir, "ntc-templates", "ntc_templates", "templates"
)
index = os.path.join(template_dir, "index")
if not os.path.isdir(template_dir) or not os.path.isfile(index):
raise ValueError(msg)
return os.path.abspath(template_dir)
def clitable_to_dict(cli_table: clitable.CliTable) -> List[Dict[str, str]]:
"""Converts TextFSM cli_table object to list of dictionaries."""
return_list = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
return_list.append(temp_dict)
return return_list
def _textfsm_parse(
textfsm_obj: clitable.CliTable,
raw_output: str,
attrs: Dict[str, str],
template_file: Optional[str] = None,
) -> Union[str, List[Dict[str, str]]]:
"""Perform the actual TextFSM parsing using the CliTable object."""
tfsm_parse: Callable[..., Any] = textfsm_obj.ParseCmd
try:
# Parse output through template
if template_file is not None:
tfsm_parse(raw_output, templates=template_file)
else:
tfsm_parse(raw_output, attrs)
structured_data = clitable_to_dict(textfsm_obj)
if structured_data == []:
return raw_output
else:
return structured_data
except (FileNotFoundError, CliTableError):
return raw_output
def get_structured_data_textfsm(
raw_output: str,
platform: Optional[str] = None,
command: Optional[str] = None,
template: Optional[str] = None,
) -> Union[str, List[Dict[str, str]]]:
"""
Convert raw CLI output to structured data using TextFSM template.
You can use a straight TextFSM file i.e. specify "template". If no template is specified,
then you must use an CliTable index file.
"""
if platform is None or command is None:
attrs = {}
else:
attrs = {"Command": command, "Platform": platform}
if template is None:
if attrs == {}:
raise ValueError(
"Either 'platform/command' or 'template' must be specified."
)
template_dir = get_template_dir()
index_file = os.path.join(template_dir, "index")
textfsm_obj = clitable.CliTable(index_file, template_dir)
output = _textfsm_parse(textfsm_obj, raw_output, attrs)
# Retry the output if "cisco_xe" and not structured data
if platform and "cisco_xe" in platform:
if not isinstance(output, list):
attrs["Platform"] = "cisco_ios"
output = _textfsm_parse(textfsm_obj, raw_output, attrs)
return output
else:
template_path = Path(os.path.expanduser(template))
template_file = template_path.name
template_dir_alt = template_path.parents[0]
# CliTable with no index will fall-back to a TextFSM parsing behavior
textfsm_obj = clitable.CliTable(template_dir=template_dir_alt)
return _textfsm_parse(
textfsm_obj, raw_output, attrs, template_file=template_file
)
# For compatibility
get_structured_data = get_structured_data_textfsm
def get_structured_data_ttp(raw_output: str, template: str) -> Union[str, List[Any]]:
"""
Convert raw CLI output to structured data using TTP template.
You can use a straight TextFSM file i.e. specify "template"
"""
if not TTP_INSTALLED:
msg = "\nTTP is not installed. Please PIP install ttp:\n\npip install ttp\n"
raise ValueError(msg)
try:
ttp_parser = ttp(data=raw_output, template=template)
ttp_parser.parse(one=True)
result: List[Any] = ttp_parser.result(format="raw")
return result
except Exception:
return raw_output
def run_ttp_template(
connection: "BaseConnection",
template: Union[str, bytes, "PathLike[Any]"],
res_kwargs: Dict[str, Any],
**kwargs: Any,
) -> Any:
"""
Helper function to run TTP template parsing.
:param connection: Netmiko connection object
:param template: TTP template
:param res_kwargs: ``**res_kwargs`` arguments for TTP result method
:param kwargs: ``**kwargs`` for TTP object instantiation
"""
if not TTP_INSTALLED:
msg = "\nTTP is not installed. Please PIP install ttp:\n" "pip install ttp\n"
raise ValueError(msg)
parser = ttp(template=template, **kwargs)
# get inputs load for TTP template
ttp_inputs_load = parser.get_input_load()
log.debug("run_ttp_template: inputs load - {}".format(ttp_inputs_load))
# go over template's inputs and collect output from devices
for template_name, inputs in ttp_inputs_load.items():
for input_name, input_params in inputs.items():
method = input_params.get("method", "send_command")
method_kwargs = input_params.get("kwargs", {})
commands = input_params.get("commands", None)
# run sanity checks
if method not in dir(connection):
log.warning(
"run_ttp_template: '{}' input, unsupported method '{}', skipping".format(
input_name, method
)
)
continue
elif not commands:
log.warning(
"run_ttp_template: '{}' input no commands to collect, skipping".format(
input_name
)
)
continue
# collect commands output from device
out_list = [
getattr(connection, method)(command_string=command, **method_kwargs)
for command in commands
]
output = "\n".join(out_list)
# add collected output to TTP parser object
parser.add_input(
data=output, input_name=input_name, template_name=template_name
)
# run parsing in single process
parser.parse(one=True)
return parser.result(**res_kwargs)
def get_structured_data_genie(
raw_output: str, platform: str, command: str
) -> Union[str, Dict[str, Any]]:
if not sys.version_info >= (3, 4):
raise ValueError("Genie requires Python >= 3.4")
if not GENIE_INSTALLED:
msg = (
"\nGenie and PyATS are not installed. Please PIP install both Genie and PyATS:\n"
"pip install genie\npip install pyats\n"
)
raise ValueError(msg)
if "cisco" not in platform:
return raw_output
genie_device_mapper = {
"cisco_ios": "ios",
"cisco_xe": "iosxe",
"cisco_xr": "iosxr",
"cisco_nxos": "nxos",
"cisco_asa": "asa",
}
os = None
# platform might be _ssh, _telnet, _serial strip that off
if platform.count("_") > 1:
base_list = platform.split("_")[:-1]
base_platform = "_".join(base_list)
else:
base_platform = platform
os = genie_device_mapper.get(base_platform)
if os is None:
return raw_output
# Genie specific construct for doing parsing (based on Genie in Ansible)
device = Device("new_device", os=os)
device.custom.setdefault("abstraction", {})
device.custom["abstraction"]["order"] = ["os"]
device.cli = AttrDict({"execute": None})
try:
# Test whether there is a parser for given command (return Exception if fails)
get_parser(command, device)
parsed_output: Dict[str, Any] = device.parse(command, output=raw_output)
return parsed_output
except Exception:
return raw_output
def structured_data_converter(
raw_data: str,
command: str,
platform: str,
use_textfsm: bool = False,
use_ttp: bool = False,
use_genie: bool = False,
textfsm_template: Optional[str] = None,
ttp_template: Optional[str] = None,
) -> Union[str, List[Any], Dict[str, Any]]:
"""
Try structured data converters in the following order: TextFSM, TTP, Genie.
Return the first structured data found, else return the raw_data as-is.
"""
command = command.strip()
if use_textfsm:
structured_output_tfsm = get_structured_data_textfsm(
raw_data, platform=platform, command=command, template=textfsm_template
)
if not isinstance(structured_output_tfsm, str):
return structured_output_tfsm
if use_ttp:
if ttp_template is None:
msg = """
The argument 'ttp_template=/path/to/template.ttp' must be set when use_ttp=True
"""
raise ValueError(msg)
else:
structured_output_ttp = get_structured_data_ttp(
raw_data, template=ttp_template
)
if not isinstance(structured_output_ttp, str):
return structured_output_ttp
if use_genie:
structured_output_genie = get_structured_data_genie(
raw_data, platform=platform, command=command
)
if not isinstance(structured_output_genie, str):
return structured_output_genie
return raw_data
def select_cmd_verify(func: F) -> F:
"""Override function cmd_verify argument with global setting."""
@functools.wraps(func)
def wrapper_decorator(self: "BaseConnection", *args: Any, **kwargs: Any) -> Any:
if self.global_cmd_verify is not None:
kwargs["cmd_verify"] = self.global_cmd_verify
return func(self, *args, **kwargs)
return cast(F, wrapper_decorator)
def m_exec_time(func: F) -> F:
@functools.wraps(func)
def wrapper_decorator(self: Any, *args: Any, **kwargs: Any) -> Any:
start_time = datetime.now()
result = func(self, *args, **kwargs)
end_time = datetime.now()
method_name = str(func)
print(f"{method_name}: Elapsed time: {end_time - start_time}")
return result
return cast(F, wrapper_decorator)
def f_exec_time(func: F) -> F:
@functools.wraps(func)
def wrapper_decorator(*args: Any, **kwargs: Any) -> Any:
start_time = datetime.now()
result = func(*args, **kwargs)
end_time = datetime.now()
print(f"Elapsed time: {end_time - start_time}")
return result
return cast(F, wrapper_decorator)
def calc_old_timeout(
max_loops: Optional[int] = None,
delay_factor: Optional[float] = None,
loop_delay: float = 0.2,
old_timeout: int = 100,
) -> float:
"""
loop_delay is .2 in Netmiko 3.x
delay_factor would multiple the loop delay
Number of loops was typically 500
Thus each loop would sleep (loop_delay * delay_factor) seconds
That sleep would happen max_loops time
Formula is (loop_delay * delay_factor) * max_loops
There was a way Netmiko's self.timeout could override the default settings and essentially be
used to adjust max_loops (this was probably rarely used).
"""
if max_loops is None:
max_loops = 500
if delay_factor is None:
delay_factor = 1.0
# This is the logic for having self.timeout override max_loops
if delay_factor == 1 and max_loops == 500:
max_loops = int(old_timeout / loop_delay)
return max_loops * loop_delay * delay_factor
| []
| []
| [
"NETMIKO_TOOLS_CFG",
"NETMIKO_DIR",
"NET_TEXTFSM"
]
| [] | ["NETMIKO_TOOLS_CFG", "NETMIKO_DIR", "NET_TEXTFSM"] | python | 3 | 0 | |
AnimeView/wsgi.py | """
WSGI config for AnimeView project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AnimeView.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
integration/integration_test.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import binascii
import glob
import itertools
import json
import os
import random
import six
import string
import subprocess
import tempfile
import uuid
import errno
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
tag_pattern = '{http://maven.apache.org/POM/4.0.0}version'
version_tag = list(tree.getroot().findall(tag_pattern))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def tobytes(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
return o
def frombytes(o):
if isinstance(o, six.binary_type):
return o.decode('utf8')
return o
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(frombytes(e.output))
print('--------------')
raise e
return frombytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def __len__(self):
return self.count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
super(PrimitiveColumn, self).__init__(name, count)
self.is_valid = is_valid
self.values = values
def _encode_value(self, x):
return x
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list([self._encode_value(x) for x in self.values]))
]
TEST_INT_MAX = 2 ** 31 - 1
TEST_INT_MIN = ~TEST_INT_MAX
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True,
min_value=TEST_INT_MIN,
max_value=TEST_INT_MAX):
super(IntegerType, self).__init__(name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
self.min_value = min_value
self.max_value = max_value
def _get_generated_data_bounds(self):
signed_iinfo = np.iinfo('int' + str(self.bit_width))
if self.is_signed:
min_value, max_value = signed_iinfo.min, signed_iinfo.max
else:
# ARROW-1837 Remove this hack and restore full unsigned integer
# range
min_value, max_value = 0, signed_iinfo.max
lower_bound = max(min_value, self.min_value)
upper_bound = min(max_value, self.max_value)
return lower_bound, upper_bound
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size, name=None):
lower_bound, upper_bound = self._get_generated_data_bounds()
return self.generate_range(size, lower_bound, upper_bound, name=name)
def generate_range(self, size, lower, upper, name=None):
values = [int(x) for x in
np.random.randint(lower, upper, size=size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class DateType(IntegerType):
DAY = 0
MILLISECOND = 1
# 1/1/1 to 12/31/9999
_ranges = {
DAY: [-719162, 2932896],
MILLISECOND: [-62135596800000, 253402214400000]
}
def __init__(self, name, unit, nullable=True):
bit_width = 32 if unit == self.DAY else 64
min_value, max_value = self._ranges[unit]
super(DateType, self).__init__(
name, True, bit_width, nullable=nullable,
min_value=min_value, max_value=max_value
)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'date'),
('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND')
])
TIMEUNIT_NAMES = {
's': 'SECOND',
'ms': 'MILLISECOND',
'us': 'MICROSECOND',
'ns': 'NANOSECOND'
}
class TimeType(IntegerType):
BIT_WIDTHS = {
's': 32,
'ms': 32,
'us': 64,
'ns': 64
}
_ranges = {
's': [0, 86400],
'ms': [0, 86400000],
'us': [0, 86400000000],
'ns': [0, 86400000000000]
}
def __init__(self, name, unit='s', nullable=True):
min_val, max_val = self._ranges[unit]
super(TimeType, self).__init__(name, True, self.BIT_WIDTHS[unit],
nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'time'),
('unit', TIMEUNIT_NAMES[self.unit]),
('bitWidth', self.bit_width)
])
class TimestampType(IntegerType):
# 1/1/1 to 12/31/9999
_ranges = {
's': [-62135596800, 253402214400],
'ms': [-62135596800000, 253402214400000],
'us': [-62135596800000000, 253402214400000000],
# Physical range for int64, ~584 years and change
'ns': [np.iinfo('int64').min, np.iinfo('int64').max]
}
def __init__(self, name, unit='s', tz=None, nullable=True):
min_val, max_val = self._ranges[unit]
super(TimestampType, self).__init__(name, True, 64, nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
self.tz = tz
def _get_type(self):
fields = [
('name', 'timestamp'),
('unit', TIMEUNIT_NAMES[self.unit])
]
if self.tz is not None:
fields.append(('timezone', self.tz))
return OrderedDict(fields)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
super(FloatingPointType, self).__init__(name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size, name=None):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
DECIMAL_PRECISION_TO_VALUE = {
key: (1 << (8 * i - 1)) - 1 for i, key in enumerate(
[1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36],
start=1,
)
}
def decimal_range_from_precision(precision):
assert 1 <= precision <= 38
try:
max_value = DECIMAL_PRECISION_TO_VALUE[precision]
except KeyError:
return decimal_range_from_precision(precision - 1)
else:
return ~max_value, max_value
class DecimalType(PrimitiveType):
def __init__(self, name, precision, scale, bit_width=128, nullable=True):
super(DecimalType, self).__init__(name, nullable=True)
self.precision = precision
self.scale = scale
self.bit_width = bit_width
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'decimal'),
('precision', self.precision),
('scale', self.scale),
])
def generate_column(self, size, name=None):
min_value, max_value = decimal_range_from_precision(self.precision)
values = [random.randint(min_value, max_value) for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return DecimalColumn(name, size, is_valid, values, self.bit_width)
class DecimalColumn(PrimitiveColumn):
def __init__(self, name, count, is_valid, values, bit_width=128):
super(DecimalColumn, self).__init__(name, count, is_valid, values)
self.bit_width = bit_width
def _encode_value(self, x):
return str(x)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size, name=None):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(tobytes(rands(K)))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class JsonSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return OrderedDict([
('fields', [field.get_json() for field in self.fields])
])
class BinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return frombytes(binascii.hexlify(x).upper())
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = b""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', offsets),
('DATA', data)
]
class StringColumn(BinaryColumn):
def _encode_value(self, x):
return frombytes(x)
class ListType(DataType):
def __init__(self, name, value_type, nullable=True):
super(ListType, self).__init__(name, nullable=nullable)
self.value_type = value_type
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_type.get_json()]
def generate_column(self, size, name=None):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_type.generate_column(offset)
if name is None:
name = self.name
return ListColumn(name, size, is_valid, offsets, values)
class ListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
super(ListColumn, self).__init__(name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class StructType(DataType):
def __init__(self, name, field_types, nullable=True):
super(StructType, self).__init__(name, nullable=nullable)
self.field_types = field_types
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [type_.get_json() for type_ in self.field_types]
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
field_values = [type_.generate_column(size)
for type_ in self.field_types]
if name is None:
name = self.name
return StructColumn(name, size, is_valid, field_values)
class Dictionary(object):
def __init__(self, id_, field, values, ordered=False):
self.id_ = id_
self.field = field
self.values = values
self.ordered = ordered
def __len__(self):
return len(self.values)
def get_json(self):
dummy_batch = JsonRecordBatch(len(self.values), [self.values])
return OrderedDict([
('id', self.id_),
('data', dummy_batch.get_json())
])
class DictionaryType(DataType):
def __init__(self, name, index_type, dictionary, nullable=True):
super(DictionaryType, self).__init__(name, nullable=nullable)
assert isinstance(index_type, IntegerType)
assert isinstance(dictionary, Dictionary)
self.index_type = index_type
self.dictionary = dictionary
def get_json(self):
dict_field = self.dictionary.field
return OrderedDict([
('name', self.name),
('type', dict_field._get_type()),
('nullable', self.nullable),
('children', dict_field._get_children()),
('dictionary', OrderedDict([
('id', self.dictionary.id_),
('indexType', self.index_type._get_type()),
('isOrdered', self.dictionary.ordered)
]))
])
def generate_column(self, size, name=None):
if name is None:
name = self.name
return self.index_type.generate_range(size, 0, len(self.dictionary),
name=name)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
super(StructColumn, self).__init__(name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class JsonRecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class JsonFile(object):
def __init__(self, name, schema, batches, dictionaries=None):
self.name = name
self.schema = schema
self.dictionaries = dictionaries or []
self.batches = batches
def get_json(self):
entries = [
('schema', self.schema.get_json())
]
if len(self.dictionaries) > 0:
entries.append(('dictionaries',
[dictionary.get_json()
for dictionary in self.dictionaries]))
entries.append(('batches', [batch.get_json()
for batch in self.batches]))
return OrderedDict(entries)
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
def get_field(name, type_, nullable=True):
if type_ == 'binary':
return BinaryType(name, nullable=nullable)
elif type_ == 'utf8':
return StringType(name, nullable=nullable)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'f':
return FloatingPointType(name, dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'b':
return BooleanType(name, nullable=nullable)
else:
raise TypeError(dtype)
def _generate_file(name, fields, batch_sizes, dictionaries=None):
schema = JsonSchema(fields)
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JsonRecordBatch(size, columns))
return JsonFile(name, schema, batches, dictionaries)
def generate_primitive_case(batch_sizes, name='primitive'):
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, True))
fields.append(get_field(type_ + "_nonnullable", type_, False))
return _generate_file(name, fields, batch_sizes)
def generate_decimal_case():
fields = [
DecimalType(name='f{}'.format(i), precision=precision, scale=2)
for i, precision in enumerate(range(3, 39))
]
possible_batch_sizes = 7, 10
batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))]
return _generate_file('decimal', fields, batch_sizes)
def generate_datetime_case():
fields = [
DateType('f0', DateType.DAY),
DateType('f1', DateType.MILLISECOND),
TimeType('f2', 's'),
TimeType('f3', 'ms'),
TimeType('f4', 'us'),
TimeType('f5', 'ns'),
TimestampType('f6', 's'),
TimestampType('f7', 'ms'),
TimestampType('f8', 'us'),
TimestampType('f9', 'ns'),
TimestampType('f10', 'ms', tz=None),
TimestampType('f11', 's', tz='UTC'),
TimestampType('f12', 'ms', tz='US/Eastern'),
TimestampType('f13', 'us', tz='Europe/Paris'),
TimestampType('f14', 'ns', tz='US/Pacific')
]
batch_sizes = [7, 10]
return _generate_file("datetime", fields, batch_sizes)
def generate_nested_case():
fields = [
ListType('list_nullable', get_field('item', 'int32')),
StructType('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# TODO(wesm): this causes segfault
# ListType('list_nonnullable', get_field('item', 'int32'), False),
]
batch_sizes = [7, 10]
return _generate_file("nested", fields, batch_sizes)
def generate_dictionary_case():
dict_type1 = StringType('dictionary1')
dict_type2 = get_field('dictionary2', 'int64')
dict1 = Dictionary(0, dict_type1,
dict_type1.generate_column(10, name='DICT0'))
dict2 = Dictionary(1, dict_type2,
dict_type2.generate_column(50, name='DICT1'))
fields = [
DictionaryType('dict1_0', get_field('', 'int8'), dict1),
DictionaryType('dict1_1', get_field('', 'int32'), dict1),
DictionaryType('dict2_0', get_field('', 'int16'), dict2)
]
batch_sizes = [7, 10]
return _generate_file("dictionary", fields, batch_sizes,
dictionaries=[dict1, dict2])
def get_generated_json_files():
temp_dir = tempfile.mkdtemp()
def _temp_path():
return
file_objs = [
generate_primitive_case([17, 20], name='primitive'),
generate_primitive_case([0, 0, 0], name='primitive_zerolength'),
generate_decimal_case(),
generate_datetime_case(),
generate_nested_case(),
generate_dictionary_case()
]
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(temp_dir, 'generated_' +
file_obj.name + '.json')
file_obj.write(out_path)
generated_paths.append(out_path)
return generated_paths
# ----------------------------------------------------------------------
# Testing harness
class IntegrationRunner(object):
def __init__(self, json_files, testers, debug=False):
self.json_files = json_files
self.testers = testers
self.temp_dir = tempfile.mkdtemp()
self.debug = debug
def run(self):
for producer, consumer in itertools.product(filter(lambda t: t.PRODUCER, self.testers),
filter(lambda t: t.CONSUMER, self.testers)):
self._compare_implementations(producer, consumer)
def _compare_implementations(self, producer, consumer):
print('##########################################################')
print(
'{0} producing, {1} consuming'.format(producer.name, consumer.name)
)
print('##########################################################')
for json_path in self.json_files:
print('==========================================================')
print('Testing file {0}'.format(json_path))
print('==========================================================')
name = os.path.splitext(os.path.basename(json_path))[0]
# Make the random access file
print('-- Creating binary inputs')
producer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.json_to_arrow')
producer.json_to_file(json_path, producer_file_path)
# Validate the file
print('-- Validating file')
consumer.validate(json_path, producer_file_path)
print('-- Validating stream')
producer_stream_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.arrow_to_stream')
consumer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.stream_to_arrow')
producer.file_to_stream(producer_file_path,
producer_stream_path)
consumer.stream_to_file(producer_stream_path,
consumer_file_path)
consumer.validate(json_path, consumer_file_path)
class Tester(object):
PRODUCER = False
CONSUMER = False
def __init__(self, debug=False):
self.debug = debug
def json_to_file(self, json_path, arrow_path):
raise NotImplementedError
def stream_to_file(self, stream_path, file_path):
raise NotImplementedError
def file_to_stream(self, file_path, stream_path):
raise NotImplementedError
def validate(self, json_path, arrow_path):
raise NotImplementedError
class JavaTester(Tester):
PRODUCER = True
CONSUMER = True
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_HOME,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile',
stream_path, file_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream',
file_path, stream_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
class CPPTester(Tester):
PRODUCER = True
CONSUMER = True
EXE_PATH = os.environ.get(
'ARROW_CPP_EXE_PATH',
os.path.join(ARROW_HOME, 'cpp/build/debug'))
CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'json-integration-test')
STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file')
FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream')
name = 'C++'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.CPP_INTEGRATION_EXE, '--integration']
if arrow_path is not None:
cmd.append('--arrow=' + arrow_path)
if json_path is not None:
cmd.append('--json=' + json_path)
cmd.append('--mode=' + command)
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
class JSTester(Tester):
PRODUCER = False
CONSUMER = True
INTEGRATION_EXE = os.path.join(ARROW_HOME, 'js/bin/integration.js')
name = 'JS'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.INTEGRATION_EXE]
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['--mode', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def stream_to_file(self, stream_path, file_path):
# Just copy stream to file, we can read the stream directly
cmd = ['cp', stream_path, file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def get_static_json_files():
glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json')
return glob.glob(glob_pattern)
def run_all_tests(debug=False):
testers = [CPPTester(debug=debug), JavaTester(debug=debug), JSTester(debug=debug)]
static_json_files = get_static_json_files()
generated_json_files = get_generated_json_files()
json_files = static_json_files + generated_json_files
runner = IntegrationRunner(json_files, testers, debug=debug)
runner.run()
print('-- All tests passed!')
def write_js_test_json(directory):
generate_nested_case().write(os.path.join(directory, 'nested.json'))
generate_decimal_case().write(os.path.join(directory, 'decimal.json'))
generate_datetime_case().write(os.path.join(directory, 'datetime.json'))
(generate_dictionary_case()
.write(os.path.join(directory, 'dictionary.json')))
(generate_primitive_case([7, 10])
.write(os.path.join(directory, 'primitive.json')))
(generate_primitive_case([0, 0, 0])
.write(os.path.join(directory, 'primitive-empty.json')))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arrow integration test CLI')
parser.add_argument('--write_generated_json', dest='generated_json_path',
action='store', default=False,
help='Generate test JSON')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='Run executables in debug mode as relevant')
args = parser.parse_args()
if args.generated_json_path:
try:
os.makedirs(args.generated_json_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
write_js_test_json(args.generated_json_path)
else:
run_all_tests(debug=args.debug)
| []
| []
| [
"ARROW_JAVA_INTEGRATION_JAR",
"ARROW_CPP_EXE_PATH"
]
| [] | ["ARROW_JAVA_INTEGRATION_JAR", "ARROW_CPP_EXE_PATH"] | python | 2 | 0 | |
tests/h/conftest.py | # -*- coding: utf-8 -*-
# pylint: disable=no-self-use
"""
The `conftest` module is automatically loaded by py.test and serves as a place
to put fixture functions that are useful application-wide.
"""
import functools
import os
import deform
import mock
import pytest
import click.testing
import sqlalchemy
from pyramid import testing
from pyramid.request import apply_request_extensions
from sqlalchemy.orm import sessionmaker
from webob.multidict import MultiDict
from h import db
from h import form
from h.settings import database_url
from h._compat import text_type
TEST_AUTHORITY = u'example.com'
TEST_DATABASE_URL = database_url(os.environ.get('TEST_DATABASE_URL',
'postgresql://postgres@localhost/htest'))
Session = sessionmaker()
class DummyFeature(object):
"""
A dummy feature flag looker-upper.
Because we're probably testing all feature-flagged functionality, this
feature client defaults every flag to *True*, which is the exact opposite
of what happens outside of testing.
"""
def __init__(self):
self.flags = {}
self.loaded = False
def __call__(self, name, *args, **kwargs):
return self.flags.get(name, True)
def all(self):
return self.flags
def load(self):
self.loaded = True
def clear(self):
self.flags = {}
class DummySession(object):
"""
A dummy database session.
"""
def __init__(self):
self.added = []
self.deleted = []
self.flushed = False
def add(self, obj):
self.added.append(obj)
def delete(self, obj):
self.deleted.append(obj)
def flush(self):
self.flushed = True
# A fake version of colander.Invalid
class FakeInvalid(object):
def __init__(self, errors):
self.errors = errors
def asdict(self):
return self.errors
def autopatcher(request, target, **kwargs):
"""Patch and cleanup automatically. Wraps :py:func:`mock.patch`."""
options = {'autospec': True}
options.update(kwargs)
patcher = mock.patch(target, **options)
obj = patcher.start()
request.addfinalizer(patcher.stop)
return obj
@pytest.yield_fixture
def cli():
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
yield runner
@pytest.fixture(scope='session')
def db_engine():
"""Set up the database connection and create tables."""
engine = sqlalchemy.create_engine(TEST_DATABASE_URL)
db.init(engine, should_create=True, should_drop=True, authority=TEST_AUTHORITY)
return engine
@pytest.yield_fixture
def db_session(db_engine):
"""
Prepare the SQLAlchemy session object.
We enable fast repeatable database tests by setting up the database only
once per session (see :func:`db_engine`) and then wrapping each test
function in a transaction that is rolled back.
Additionally, we set a SAVEPOINT before entering the test, and if we
detect that the test has committed (i.e. released the savepoint) we
immediately open another. This has the effect of preventing test code from
committing the outer transaction.
"""
conn = db_engine.connect()
trans = conn.begin()
session = Session(bind=conn)
session.begin_nested()
@sqlalchemy.event.listens_for(session, "after_transaction_end")
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
session.begin_nested()
try:
yield session
finally:
session.close()
trans.rollback()
conn.close()
@pytest.yield_fixture
def factories(db_session):
from ..common import factories
factories.set_session(db_session)
yield factories
factories.set_session(None)
@pytest.fixture
def fake_feature():
return DummyFeature()
@pytest.fixture
def fake_db_session():
return DummySession()
@pytest.fixture
def form_validating_to():
def form_validating_to(appstruct):
form = mock.MagicMock()
form.validate.return_value = appstruct
form.render.return_value = 'valid form'
return form
return form_validating_to
@pytest.fixture
def invalid_form():
def invalid_form(errors=None):
if errors is None:
errors = {}
invalid = FakeInvalid(errors)
form = mock.MagicMock()
form.validate.side_effect = deform.ValidationFailure(None, None, invalid)
form.render.return_value = 'invalid form'
return form
return invalid_form
@pytest.fixture
def mailer(pyramid_config):
from pyramid_mailer.interfaces import IMailer
from pyramid_mailer.testing import DummyMailer
mailer = DummyMailer()
pyramid_config.registry.registerUtility(mailer, IMailer)
return mailer
@pytest.fixture
def matchers():
from ..common import matchers
return matchers
@pytest.fixture
def notify(pyramid_config, request):
patcher = mock.patch.object(pyramid_config.registry, 'notify', autospec=True)
request.addfinalizer(patcher.stop)
return patcher.start()
@pytest.fixture
def patch(request):
return functools.partial(autopatcher, request)
@pytest.yield_fixture
def pyramid_config(pyramid_settings, pyramid_request):
"""Pyramid configurator object."""
with testing.testConfig(request=pyramid_request,
settings=pyramid_settings) as config:
# Include pyramid_services so it's easy to set up fake services in tests
config.include('pyramid_services')
apply_request_extensions(pyramid_request)
yield config
@pytest.fixture
def pyramid_request(db_session, fake_feature, pyramid_settings):
"""Dummy Pyramid request object."""
request = testing.DummyRequest(db=db_session, feature=fake_feature)
request.authority = text_type(TEST_AUTHORITY)
request.create_form = mock.Mock()
request.matched_route = mock.Mock()
request.registry.settings = pyramid_settings
request.is_xhr = False
request.params = MultiDict()
request.GET = request.params
request.POST = request.params
return request
@pytest.fixture
def pyramid_csrf_request(pyramid_request):
"""Dummy Pyramid request object with a valid CSRF token."""
pyramid_request.headers['X-CSRF-Token'] = pyramid_request.session.get_csrf_token()
return pyramid_request
@pytest.fixture
def pyramid_settings():
"""Default app settings."""
return {
'sqlalchemy.url': TEST_DATABASE_URL
}
| []
| []
| [
"TEST_DATABASE_URL"
]
| [] | ["TEST_DATABASE_URL"] | python | 1 | 0 | |
dev.go | package log
import (
"os"
"strings"
)
// IsDevelopment evaluates the following sources:
// - if any _INTELLIJ_* environment variable is defined, returns true
// - XPC_SERVICE_NAME contains goland
// - if APP_ENV or NODE_ENV environment variable is set to 'production' returns false, otherwise if specified at all
// returns true
// - if any VSCODE_* environment variable is defined, returns true
// - otherwise returns false
func IsDevelopment() bool {
for _, kv := range os.Environ() {
if strings.HasPrefix(kv, "_INTELLIJ_") || strings.HasPrefix(kv, "VSCODE_") {
return true
}
}
if strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "goland") {
return true
}
nodeEnv := os.Getenv("APP_ENV")
if strings.TrimSpace(nodeEnv) != "" {
return nodeEnv != "production"
}
nodeEnv = os.Getenv("NODE_ENV")
if strings.TrimSpace(nodeEnv) != "" {
return nodeEnv != "production"
}
return false
}
| [
"\"XPC_SERVICE_NAME\"",
"\"APP_ENV\"",
"\"NODE_ENV\""
]
| []
| [
"APP_ENV",
"NODE_ENV",
"XPC_SERVICE_NAME"
]
| [] | ["APP_ENV", "NODE_ENV", "XPC_SERVICE_NAME"] | go | 3 | 0 | |
src/main/java/swim/iot/util/EnvConfig.java | package swim.iot.util;
public class EnvConfig {
/**
* Configuration environment variables for Event Hub
*/
public static final String EVENT_HUB_CONNSTRING = envCorrection(System.getenv("EVENT_HUB_CONNSTRING"), "");
public static final String EVENT_HUB_NAME = envCorrection(System.getenv("EVENT_HUB_NAME"), "");
/**
* Configuration environment variable for device name
*/
public static final String EDGE_DEVICE_NAME = envCorrection(System.getenv("EDGE_DEVICE_NAME"), "localSimulator");
/**
* Configuration environment variables for ADLS Gen2
*/
public static final String ADLS_ACCOUNT_NAME = envCorrection(System.getenv("ADLS_ACCOUNT_NAME"), "");
public static final String ADLS_ACCOUNT_KEY = envCorrection(System.getenv("ADLS_ACCOUNT_KEY"), "");
public static final String FILE_SYSTEM = envCorrection(System.getenv("FILE_SYSTEM"), "");
/**
* Helper function that standards parsing environment variables
*
* @param env
* @return "" or env.trim()
*/
private static String envCorrection(String env, String defaultValue) {
if (env == null || env.isEmpty()) {
return defaultValue;
} else return env.trim();
}
}
| [
"\"EVENT_HUB_CONNSTRING\"",
"\"EVENT_HUB_NAME\"",
"\"EDGE_DEVICE_NAME\"",
"\"ADLS_ACCOUNT_NAME\"",
"\"ADLS_ACCOUNT_KEY\"",
"\"FILE_SYSTEM\""
]
| []
| [
"FILE_SYSTEM",
"EVENT_HUB_CONNSTRING",
"EDGE_DEVICE_NAME",
"ADLS_ACCOUNT_KEY",
"ADLS_ACCOUNT_NAME",
"EVENT_HUB_NAME"
]
| [] | ["FILE_SYSTEM", "EVENT_HUB_CONNSTRING", "EDGE_DEVICE_NAME", "ADLS_ACCOUNT_KEY", "ADLS_ACCOUNT_NAME", "EVENT_HUB_NAME"] | java | 6 | 0 | |
cmd/tink-server/cmd/root.go | package cmd
import (
"context"
"crypto/tls"
"database/sql"
"fmt"
"io/ioutil"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/packethost/pkg/log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/tinkerbell/tink/client/listener"
"github.com/tinkerbell/tink/db"
rpcServer "github.com/tinkerbell/tink/grpc-server"
httpServer "github.com/tinkerbell/tink/http-server"
)
// NewRootCommand creates a new Tink Server Cobra root command.
func NewRootCommand(version string, logger log.Logger) *cobra.Command {
rootCmd := &cobra.Command{
Use: "tink-server",
Short: "Tinkerbell provisioning and workflow engine",
Long: "Tinkerbell provisioning and workflow engine",
Version: version,
PreRunE: func(cmd *cobra.Command, args []string) error {
viper, err := createViper(logger)
if err != nil {
return err
}
return applyViper(viper, cmd)
},
Run: func(cmd *cobra.Command, args []string) {
facility, _ := cmd.Flags().GetString("facility")
caCertFile, _ := cmd.Flags().GetString("ca-cert")
tlsCertFile, _ := cmd.Flags().GetString("tls-cert")
tlsKeyFile, _ := cmd.Flags().GetString("tls-key")
onlyMigration, _ := cmd.Flags().GetBool("only-migration")
logger = logger.With("facility", facility)
logger.With("version", version).Info("starting")
ctx, closer := context.WithCancel(context.Background())
errCh := make(chan error, 2)
// TODO(gianarb): I moved this up because we need to be sure that both
// connection, the one used for the resources and the one used for
// listening to events and notification are coming in the same way.
// BUT we should be using the right flags
connInfo := fmt.Sprintf("dbname=%s user=%s password=%s sslmode=%s",
os.Getenv("PGDATABASE"),
os.Getenv("PGUSER"),
os.Getenv("PGPASSWORD"),
os.Getenv("PGSSLMODE"),
)
dbCon, err := sql.Open("postgres", connInfo)
if err != nil {
logger.Fatal(err)
}
tinkDB := db.Connect(dbCon, logger)
if onlyMigration {
logger.Info("Applying migrations. This process will end when migrations will take place.")
numAppliedMigrations, err := tinkDB.Migrate()
if err != nil {
logger.Fatal(err)
}
logger.With("num_applied_migrations", numAppliedMigrations).Info("Migrations applied successfully")
os.Exit(0)
}
if err := listener.Init(connInfo); err != nil {
logger.Fatal(err)
}
go tinkDB.PurgeEvents(errCh)
numAvailableMigrations, err := tinkDB.CheckRequiredMigrations()
if err != nil {
logger.Fatal(err)
}
if numAvailableMigrations != 0 {
logger.Info("Your database schema is not up to date. Please apply migrations running tink-server with env var ONLY_MIGRATION set.")
}
tlsCert, certPEM, modT, err := getCerts(caCertFile, tlsCertFile, tlsKeyFile)
if err != nil {
logger.Fatal(err)
}
rpcServer.SetupGRPC(ctx, logger, facility, tinkDB, certPEM, tlsCert, modT, errCh)
httpServer.SetupHTTP(ctx, logger, certPEM, modT, errCh)
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
select {
case err = <-errCh:
logger.Fatal(err)
case sig := <-sigs:
logger.With("signal", sig.String()).Info("signal received, stopping servers")
}
closer()
// wait for grpc server to shutdown
err = <-errCh
if err != nil {
logger.Fatal(err)
}
err = <-errCh
if err != nil {
logger.Fatal(err)
}
},
}
must := func(err error) {
if err != nil {
logger.Fatal(err)
}
}
rootCmd.Flags().String("facility", "", "Facility")
rootCmd.Flags().String("ca-cert", "", "File containing the ca certificate")
rootCmd.Flags().String("tls-cert", "bundle.pem", "File containing the tls certificate")
must(rootCmd.MarkFlagRequired("tls-cert"))
rootCmd.Flags().String("tls-key", "server-key.pem", "File containing the tls private key")
must(rootCmd.MarkFlagRequired("tls-cert"))
rootCmd.Flags().Bool("only-migration", false, "only run database migrations")
return rootCmd
}
// createViper creates a Viper object configured to read in configuration files
// (from various paths with content type specific filename extensions) and loads
// environment variables.
func createViper(logger log.Logger) (*viper.Viper, error) {
v := viper.New()
v.AutomaticEnv()
v.SetConfigName("tink-server")
v.AddConfigPath("/etc/tinkerbell")
v.AddConfigPath(".")
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
// If a config file is found, read it in.
if err := v.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
logger.With("configFile", v.ConfigFileUsed()).Error(err, "could not load config file")
return nil, err
}
logger.Info("no config file found")
} else {
logger.With("configFile", v.ConfigFileUsed()).Info("loaded config file")
}
return v, nil
}
func applyViper(v *viper.Viper, cmd *cobra.Command) error {
errors := []error{}
cmd.Flags().VisitAll(func(f *pflag.Flag) {
if !f.Changed && v.IsSet(f.Name) {
val := v.Get(f.Name)
if err := cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val)); err != nil {
errors = append(errors, err)
return
}
}
})
if len(errors) > 0 {
errs := []string{}
for _, err := range errors {
errs = append(errs, err.Error())
}
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func getCerts(caPath, certPath, keyPath string) (tls.Certificate, []byte, time.Time, error) {
var (
modT time.Time
caCertBytes []byte
)
if caPath != "" {
ca, modified, err := readFromFile(caPath)
if err != nil {
return tls.Certificate{}, nil, modT, fmt.Errorf("failed to read ca cert: %w", err)
}
if modified.After(modT) {
modT = modified
}
caCertBytes = ca
}
tlsCertBytes, modified, err := readFromFile(certPath)
if err != nil {
return tls.Certificate{}, tlsCertBytes, modT, fmt.Errorf("failed to read tls cert: %w", err)
}
if modified.After(modT) {
modT = modified
}
tlsKeyBytes, modified, err := readFromFile(keyPath)
if err != nil {
return tls.Certificate{}, tlsCertBytes, modT, fmt.Errorf("failed to read tls key: %w", err)
}
if modified.After(modT) {
modT = modified
}
// If we read in a separate ca certificate, concatenate it with the tls cert
if len(caCertBytes) > 0 {
tlsCertBytes = append(tlsCertBytes, caCertBytes...)
}
cert, err := tls.X509KeyPair(tlsCertBytes, tlsKeyBytes)
if err != nil {
return cert, tlsCertBytes, modT, fmt.Errorf("failed to ingest TLS files: %w", err)
}
return cert, tlsCertBytes, modT, nil
}
func readFromFile(filePath string) ([]byte, time.Time, error) {
var modified time.Time
f, err := os.Open(filePath)
if err != nil {
return nil, modified, err
}
stat, err := f.Stat()
if err != nil {
return nil, modified, err
}
modified = stat.ModTime()
contents, err := ioutil.ReadAll(f)
if err != nil {
return nil, modified, err
}
return contents, modified, nil
}
| [
"\"PGDATABASE\"",
"\"PGUSER\"",
"\"PGPASSWORD\"",
"\"PGSSLMODE\""
]
| []
| [
"PGUSER",
"PGSSLMODE",
"PGPASSWORD",
"PGDATABASE"
]
| [] | ["PGUSER", "PGSSLMODE", "PGPASSWORD", "PGDATABASE"] | go | 4 | 0 | |
src/carousel/wsgi.py | """
WSGI config for carousel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "carousel.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
benchmarks/pipe.py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
import logging
import math
import os
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule, pipe
from fairscale.optim import GradScaler
from fairscale.optim.oss import OSS
from fairscale.utils.testing import dist_init, get_worker_map
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
# if iteration_count == 196:
# dump_cuda_tensors()
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequential(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequential
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
def get_data(device):
with warnings.catch_warnings(record=True) as fjldska:
TEXT = torchtext.data.Field(
tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
ntokens = len(TEXT.vocab.stoi)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size, TEXT, device)
val_data = batchify(val_txt, eval_batch_size, TEXT, device)
test_data = batchify(test_txt, eval_batch_size, TEXT, device)
return ntokens, train_data, val_data, test_data
def batchify(data, bsz, TEXT, device):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
if args.lazy_construction:
layers = [
LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),
LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))
model = layers
else:
model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
if args.ddp_zero:
return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)
else:
return Adam(model.parameters(), lr=lr)
optimizer = make_adam
scaler = GradScaler()
return model, criterion, optimizer, scaler
def get_tensors_by_size_bucket():
from collections import defaultdict
import gc
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def dump_size_buckets(size_buckets, prefix=""):
from functools import reduce
import operator
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(prefix + f"{key} : {value}, {this}")
print(prefix + f"total = {total}")
last_size_buckets = None
once = True
def safe_rank():
try:
return torch.distributed.get_rank()
except AssertionError:
return 0
def check_size_buckets():
global last_size_buckets
global once
size_buckets = get_tensors_by_size_bucket()
if last_size_buckets is not None:
if size_buckets != last_size_buckets:
print(f"difference is oustanding tensors: {safe-rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
if once:
print(f"dumping buckets for: {safe_rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
once = False
else:
print(f"size buckets none on {safe_rank()}")
last_size_buckets = size_buckets
def dump_cuda_tensors():
print(f"dumping cuda tensors...")
from functools import reduce
import gc
import operator
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
print(f"outstanding cuda tensors:")
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(f"{key} : {value}, {this}")
print(f"total size = {total}")
import pprint
pprint.pprint(torch.cuda.memory_stats())
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
from functools import reduce
import operator
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if model.group:
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #prams = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #prams = {num_params}")
vocab_size = 10000 # FIXME
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model)
def get_first_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[0]
else:
return torch.cuda.current_device()
def get_last_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[-1]
else:
return torch.cuda.current_device()
pipe_group = model.group
if args.ddp_zero:
model = DDP(
model,
device_ids=[torch.cuda.current_device()],
process_group=get_data_parallel_group(),
find_unused_parameters=False,
)
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
thing = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return thing
def __len__(self):
return len(lm_dataloader)
lm_dataloader = FakeDataset()
for i, batch in enumerate(lm_dataloader):
bi = batch["input"]
if args.max_batch and i > args.max_batch:
break
optimizer.zero_grad()
try:
if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:
tmp = batch["input"].to(get_first_device(model))
output = model(tmp)
else:
output = model(batch["input"])
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = batch["target"].to(get_last_device(model))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
if args.ddp_zero:
ddp_group = get_data_parallel_group()
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)
loss /= ddp_group.size()
loss.backward()
del target
else:
if args.ddp_zero:
model.module.back_helper(output)
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
word_counter += batch["ntokens"]
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
word_counter = 0
total_loss = 0
start_time = time.time()
# if i >= 10:
# break
# torch.cuda.empty_cache()
# check_size_buckets()
def evaluate(eval_model, data_source, criterion, bptt, ntokens):
eval_model.eval()
total_loss = 0.0
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
epoch = 1
bptt = 35
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
epoch_start_time = time.time()
train(train_data, model, criterion, optimizer, bptt, ntokens, args)
val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)
print("-" * 89)
print(
"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
epoch, (time.time() - epoch_start_time), val_loss
)
)
print("-" * 110)
elapsed_time = time.time() - start_time
nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
wps = nwords / elapsed_time
test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)
print("=" * 89)
print(
"| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
test_loss, elapsed_time, nwords, wps
)
)
print("=" * 110)
if can_benchmark and len(model.balance) == 4:
# Assert that words per second is within 3 standard deviations of the average
# of six golden runs
assert wps > 36954.4 - (3 * 116.825)
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:1: {:1d}".format(torch.cuda.memory_stats(1)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:2: {:1d}".format(torch.cuda.memory_stats(2)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:3: {:1d}".format(torch.cuda.memory_stats(3)["allocated_bytes.all.peak"]))
# Assert that memory usage on each GPU is within 10% of golden run
# Right-hand-side is golden run bytes * 110%
assert torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] < 4061909504 * 1.1
assert torch.cuda.memory_stats(1)["allocated_bytes.all.peak"] < 4050944 * 1.1
assert torch.cuda.memory_stats(2)["allocated_bytes.all.peak"] < 10427392 * 1.1
assert torch.cuda.memory_stats(3)["allocated_bytes.all.peak"] < 2031824896 * 1.1
print("No regression detected")
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device, new_data: bool = True):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if new_data:
vocab_size = 10000
model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
else:
data = get_data(device)
ntokens, train_data, val_data, test_data = data
model, criterion, optimizer, scaler = make_model(args, device, ntokens)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": data,
}
def bench_single_process(args):
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
init_random_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance(min(num_devices, 4), len(model))
p = pipe.Pipe(
model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint
)
del model
del blob["model"]
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_mp_worker(args, available_workers):
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)
p = pipe.Pipe(
model,
balance,
style=Pipe.AsyncSchedule,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
pipelined_backward=args.pipelined_backward,
checkpoint=args.checkpoint,
# loss_fn=blob["criterion"],
)
if torch.cuda.is_available():
p = p.cuda()
if args.all_at_once and p.pipeline:
print(f"running all at once")
p.pipeline.all_at_once = True
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def bench_multi_process(args, all_at_once=False):
if args.local_world_size != 0:
world_size = args.local_world_size
else:
world_size = min(torch.cuda.device_count(), 2)
mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)
best_device_map = {
0: "mlx5_0:1",
1: "mlx5_0:1",
2: "mlx5_1:1",
3: "mlx5_1:1",
4: "mlx5_2:1",
5: "mlx5_2:1",
6: "mlx5_3:1",
7: "mlx5_3:1",
}
def bench_mpi(args):
guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
os.environ["UCX_NET_DEVICES"] = best_device_map[local_rank]
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10638"
if args.socket_name:
os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
os.environ["TP_SOCKET_IFNAME"] = args.socket_name
torch.distributed.init_process_group(backend="gloo", rank=guess_rank, world_size=world_size)
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(local_rank % torch.cuda.device_count())
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),
)
backends = {"model_parallel_backend": "nccl", "pipeline_backend": "mpi", "ddp_backend": "nccl"}
if args.ddp_zero:
initialize_model_parallel(1, 4, **backends)
else:
initialize_model_parallel(1, world_size, **backends)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument("--ddp-zero", action="store_true", default=False, help="enable ddp")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument(
"--pipelined-backward", dest="pipelined_backward", action="store_true", help="Pipelined backward pass"
)
parser.add_argument(
"--no-pipelined-backward", dest="pipelined_backward", action="store_false", help="Pipelined backward pass"
)
parser.set_defaults(pipelined_backward=True)
if __name__ == "__main__":
args = parser.parse_args()
# bench_multi_process(args, all_at_once=True)
if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
print(f"Running benchmark with args: {args}")
bench_single_process(args)
else:
if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
print(f"Running benchmark with args: {args}")
bench_mpi(args)
| []
| []
| [
"MASTER_ADDR",
"MASTER_PORT",
"TP_SOCKET_IFNAME",
"UCX_NET_DEVICES",
"OMPI_COMM_WORLD_LOCAL_RANK",
"OMPI_COMM_WORLD_SIZE",
"OMPI_COMM_WORLD_RANK",
"GLOO_SOCKET_IFNAME"
]
| [] | ["MASTER_ADDR", "MASTER_PORT", "TP_SOCKET_IFNAME", "UCX_NET_DEVICES", "OMPI_COMM_WORLD_LOCAL_RANK", "OMPI_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_RANK", "GLOO_SOCKET_IFNAME"] | python | 8 | 0 | |
webdriver/tests/support/fixtures.py | import copy
import json
import os
import asyncio
import pytest
import webdriver
from urllib.parse import urlunsplit
from tests.support import defaults
from tests.support.helpers import cleanup_session, deep_update
from tests.support.inline import build_inline
from tests.support.http_request import HTTPRequest
from tests.support.sync import Poll
_current_session = None
_custom_session = False
def pytest_configure(config):
# register the capabilities marker
config.addinivalue_line("markers",
"capabilities: mark test to use capabilities")
@pytest.fixture
def capabilities():
"""Default capabilities to use for a new WebDriver session."""
return {}
def pytest_generate_tests(metafunc):
if "capabilities" in metafunc.fixturenames:
marker = metafunc.definition.get_closest_marker(name="capabilities")
if marker:
metafunc.parametrize("capabilities", marker.args, ids=None)
# Ensure that the event loop is restarted once per session rather than the default of once per test
# if we don't do this, tests will try to reuse a closed event loop and fail with an error that the "future
# belongs to a different loop"
@pytest.fixture(scope="session")
def event_loop():
"""Change event_loop fixture to session level."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture
def add_event_listeners(session):
"""Register listeners for tracked events on element."""
def add_event_listeners(element, tracked_events):
element.session.execute_script("""
let element = arguments[0];
let trackedEvents = arguments[1];
if (!("events" in window)) {
window.events = [];
}
for (var i = 0; i < trackedEvents.length; i++) {
element.addEventListener(trackedEvents[i], function (event) {
window.events.push(event.type);
});
}
""", args=(element, tracked_events))
return add_event_listeners
@pytest.fixture
def create_cookie(session, url):
"""Create a cookie"""
def create_cookie(name, value, **kwargs):
if kwargs.get("path", None) is not None:
session.url = url(kwargs["path"])
session.set_cookie(name, value, **kwargs)
return session.cookies(name)
return create_cookie
@pytest.fixture
def create_frame(session):
"""Create an `iframe` element in the current browsing context and insert it
into the document. Return a reference to the newly-created element."""
def create_frame():
append = """
var frame = document.createElement('iframe');
document.body.appendChild(frame);
return frame;
"""
return session.execute_script(append)
return create_frame
@pytest.fixture
def http(configuration):
return HTTPRequest(configuration["host"], configuration["port"])
@pytest.fixture
def server_config():
with open(os.environ.get("WD_SERVER_CONFIG_FILE"), "r") as f:
return json.load(f)
@pytest.fixture(scope="session")
def configuration():
host = os.environ.get("WD_HOST", defaults.DRIVER_HOST)
port = int(os.environ.get("WD_PORT", str(defaults.DRIVER_PORT)))
capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}"))
return {
"host": host,
"port": port,
"capabilities": capabilities
}
async def reset_current_session_if_necessary(caps, request_bidi):
global _current_session
# If there is a session with different capabilities active or the current session
# is of different type than the one we would like to create, end it now.
if _current_session is not None:
is_bidi = isinstance(_current_session, webdriver.BidiSession)
if is_bidi != request_bidi or not _current_session.match(caps):
if is_bidi:
await _current_session.end()
else:
_current_session.end()
_current_session = None
@pytest.fixture(scope="function")
async def session(capabilities, configuration, request):
"""Create and start a session for a test that does not itself test session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test."""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps, False)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps)
try:
_current_session.start()
except webdriver.error.SessionNotCreatedException:
if not _current_session.session_id:
raise
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session
cleanup_session(_current_session)
@pytest.fixture(scope="function")
async def bidi_session(capabilities, configuration, request):
"""Create and start a bidi session for a test that does not itself test
bidi session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test."""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps, True)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps,
enable_bidi=True)
try:
_current_session.start()
await _current_session.bidi_session.start()
except webdriver.error.SessionNotCreatedException:
if not _current_session.session_id:
raise
# Enforce a fixed default window size and position
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session.bidi_session
await _current_session.bidi_session.end()
cleanup_session(_current_session)
@pytest.fixture(scope="function")
def current_session():
return _current_session
@pytest.fixture
def url(server_config):
def url(path, protocol="http", domain="", subdomain="", query="", fragment=""):
domain = server_config["domains"][domain][subdomain]
port = server_config["ports"][protocol][0]
host = "{0}:{1}".format(domain, port)
return urlunsplit((protocol, host, path, query, fragment))
return url
@pytest.fixture
def create_dialog(session):
"""Create a dialog (one of "alert", "prompt", or "confirm") and provide a
function to validate that the dialog has been "handled" (either accepted or
dismissed) by returning some value."""
def create_dialog(dialog_type, text=None):
assert dialog_type in ("alert", "confirm", "prompt"), (
"Invalid dialog type: '%s'" % dialog_type)
if text is None:
text = ""
assert isinstance(text, str), "`text` parameter must be a string"
# Script completes itself when the user prompt has been opened.
# For prompt() dialogs, add a value for the 'default' argument,
# as some user agents (IE, for example) do not produce consistent
# values for the default.
session.execute_async_script("""
let dialog_type = arguments[0];
let text = arguments[1];
setTimeout(function() {
if (dialog_type == 'prompt') {
window.dialog_return_value = window[dialog_type](text, '');
} else {
window.dialog_return_value = window[dialog_type](text);
}
}, 0);
""", args=(dialog_type, text))
wait = Poll(
session,
timeout=15,
ignored_exceptions=webdriver.NoSuchAlertException,
message="No user prompt with text '{}' detected".format(text))
wait.until(lambda s: s.alert.text == text)
return create_dialog
@pytest.fixture
def closed_frame(session, url):
original_handle = session.window_handle
new_handle = session.new_window()
session.window_handle = new_handle
session.url = url("/webdriver/tests/support/html/frames.html")
subframe = session.find.css("#sub-frame", all=False)
session.switch_frame(subframe)
deleteframe = session.find.css("#delete-frame", all=False)
session.switch_frame(deleteframe)
button = session.find.css("#remove-parent", all=False)
button.click()
yield
session.window.close()
assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
session.window_handle = original_handle
@pytest.fixture
def closed_window(session, inline):
original_handle = session.window_handle
new_handle = session.new_window()
session.window_handle = new_handle
session.url = inline("<input id='a' value='b'>")
element = session.find.css("input", all=False)
session.window.close()
assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
yield (original_handle, element)
session.window_handle = original_handle
@pytest.fixture
def inline(url):
"""Takes a source extract and produces well-formed documents.
Based on the desired document type, the extract is embedded with
predefined boilerplate in order to produce well-formed documents.
The media type and character set may also be individually configured.
This helper function originally used data URLs, but since these
are not universally supported (or indeed standardised!) across
browsers, it now delegates the serving of the document to wptserve.
This file also acts as a wptserve handler (see the main function
below) which configures the HTTP response using query parameters.
This function returns a URL to the wptserve handler, which in turn
will serve an HTTP response with the requested source extract
inlined in a well-formed document, and the Content-Type header
optionally configured using the desired media type and character set.
Any additional keyword arguments are passed on to the build_url
function, which comes from the url fixture.
"""
def inline(src, **kwargs):
return build_inline(url, src, **kwargs)
return inline
@pytest.fixture
def iframe(inline):
"""Inlines document extract as the source document of an <iframe>."""
def iframe(src, **kwargs):
return "<iframe src='{}'></iframe>".format(inline(src, **kwargs))
return iframe
| []
| []
| [
"WD_HOST",
"WD_CAPABILITIES",
"WD_PORT",
"WD_SERVER_CONFIG_FILE"
]
| [] | ["WD_HOST", "WD_CAPABILITIES", "WD_PORT", "WD_SERVER_CONFIG_FILE"] | python | 4 | 0 | |
leaderboard/handler.go | package function
import (
"database/sql"
"encoding/json"
"log"
"net/http"
"os"
_ "github.com/lib/pq"
"github.com/pkg/errors"
"github.com/openfaas/openfaas-cloud/sdk"
)
var db *sql.DB
var cors string
// init establishes a persistent connection to the remote database
// the function will panic if it cannot establish a link and the
// container will restart / go into a crash/back-off loop
func init() {
password, _ := sdk.ReadSecret("password")
user, _ := sdk.ReadSecret("username")
host, _ := sdk.ReadSecret("host")
dbName := os.Getenv("postgres_db")
port := os.Getenv("postgres_port")
sslmode := os.Getenv("postgres_sslmode")
connStr := "postgres://" + user + ":" + password + "@" + host + ":" + port + "/" + dbName + "?sslmode=" + sslmode
var err error
db, err = sql.Open("postgres", connStr)
if err != nil {
panic(err.Error())
}
err = db.Ping()
if err != nil {
panic(err.Error())
}
if val, ok := os.LookupEnv("allow_cors"); ok && len(val) > 0 {
cors = val
}
}
// Handle a HTTP request as a middleware processor.
func Handle(w http.ResponseWriter, r *http.Request) {
rows, getErr := db.Query(`select * from get_leaderboard();`)
if getErr != nil {
log.Printf("get error: %s", getErr.Error())
http.Error(w, errors.Wrap(getErr, "unable to get from leaderboard").Error(),
http.StatusInternalServerError)
return
}
results := []Result{}
defer rows.Close()
for rows.Next() {
result := Result{}
scanErr := rows.Scan(&result.UserID, &result.UserLogin, &result.IssueComments, &result.IssuesCreated)
if scanErr != nil {
log.Println("scan err:", scanErr)
}
results = append(results, result)
}
if len(cors) > 0 {
w.Header().Set("Access-Control-Allow-Origin", cors)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
res, _ := json.Marshal(results)
w.Write(res)
}
type Result struct {
UserID int
UserLogin string
IssueComments int
IssuesCreated int
}
| [
"\"postgres_db\"",
"\"postgres_port\"",
"\"postgres_sslmode\""
]
| []
| [
"postgres_db",
"postgres_port",
"postgres_sslmode"
]
| [] | ["postgres_db", "postgres_port", "postgres_sslmode"] | go | 3 | 0 | |
rest/keys/list-post-example/list-post-example.6.x.py | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
key = client.new_keys \
.create(friendly_name="User Joey")
print(key.sid)
print(key.secret)
| []
| []
| [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | python | 2 | 0 | |
cmd/apps/kubernetes_exec.go | package apps
import (
"fmt"
"log"
"os"
"path"
"strings"
execute "github.com/alexellis/go-execute/pkg/v1"
"github.com/alexellis/k3sup/pkg/env"
)
func fetchChart(path, chart string, helm3 bool) error {
subdir := ""
if helm3 {
subdir = "helm3"
}
mkErr := os.MkdirAll(path, 0700)
if mkErr != nil {
return mkErr
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s fetch %s --untar --untardir %s", env.LocalBinary("helm", subdir), chart, path),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func getNodeArchitecture() string {
res, _ := kubectlTask("get", "nodes", `--output`, `jsonpath={range $.items[0]}{.status.nodeInfo.architecture}`)
arch := strings.TrimSpace(string(res.Stdout))
return arch
}
func helm3Upgrade(basePath, chart, namespace, values string, overrides map[string]string, wait bool) error {
chartName := chart
if index := strings.Index(chartName, "/"); index > -1 {
chartName = chartName[index+1:]
}
chartRoot := basePath
args := []string{"upgrade", "--install", chartName, chart, "--namespace", namespace}
fmt.Println("VALUES", values)
if len(values) > 0 {
args = append(args, "--values")
if !strings.HasPrefix(values, "/") {
args = append(args, path.Join(chartRoot, values))
} else {
args = append(args, values)
}
}
for k, v := range overrides {
args = append(args, "--set")
args = append(args, fmt.Sprintf("%s=%s", k, v))
}
task := execute.ExecTask{
Command: env.LocalBinary("helm", "helm3"),
Args: args,
Env: os.Environ(),
Cwd: basePath,
StreamStdio: true,
}
fmt.Printf("Command: %s %s\n", task.Command, task.Args)
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d, stderr: %s", res.ExitCode, res.Stderr)
}
if len(res.Stderr) > 0 {
log.Printf("stderr: %s\n", res.Stderr)
}
return nil
}
func templateChart(basePath, chart, namespace, outputPath, values string, overrides map[string]string) error {
rmErr := os.RemoveAll(outputPath)
if rmErr != nil {
log.Printf("Error cleaning up: %s, %s\n", outputPath, rmErr.Error())
}
mkErr := os.MkdirAll(outputPath, 0700)
if mkErr != nil {
return mkErr
}
overridesStr := ""
for k, v := range overrides {
overridesStr += fmt.Sprintf(" --set %s=%s", k, v)
}
chartRoot := path.Join(basePath, chart)
valuesStr := ""
if len(values) > 0 {
valuesStr = "--values " + path.Join(chartRoot, values)
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s template %s --name %s --namespace %s --output-dir %s %s %s",
env.LocalBinary("helm", ""), chart, chart, namespace, outputPath, valuesStr, overridesStr),
Env: os.Environ(),
Cwd: basePath,
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d, stderr: %s", res.ExitCode, res.Stderr)
}
if len(res.Stderr) > 0 {
log.Printf("stderr: %s\n", res.Stderr)
}
return nil
}
func addHelmRepo(name, url string, helm3 bool) error {
subdir := ""
if helm3 {
subdir = "helm3"
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s repo add %s %s", env.LocalBinary("helm", subdir), name, url),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func updateHelmRepos(helm3 bool) error {
subdir := ""
if helm3 {
subdir = "helm3"
}
task := execute.ExecTask{
Command: fmt.Sprintf("%s repo update", env.LocalBinary("helm", subdir)),
Env: os.Environ(),
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("exit code %d", res.ExitCode)
}
return nil
}
func kubectlTask(parts ...string) (execute.ExecResult, error) {
task := execute.ExecTask{
Command: "kubectl",
Args: parts,
StreamStdio: false,
}
res, err := task.Execute()
return res, err
}
func kubectl(parts ...string) error {
task := execute.ExecTask{
Command: "kubectl",
Args: parts,
StreamStdio: true,
}
res, err := task.Execute()
if err != nil {
return err
}
if res.ExitCode != 0 {
return fmt.Errorf("kubectl exit code %d, stderr: %s",
res.ExitCode,
res.Stderr)
}
return nil
}
func getDefaultKubeconfig() string {
kubeConfigPath := path.Join(os.Getenv("HOME"), ".kube/config")
if val, ok := os.LookupEnv("KUBECONFIG"); ok {
kubeConfigPath = val
}
return kubeConfigPath
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
core/api/app.go | package api
import (
"context"
"core/app"
"core/collaborator"
appleError "core/error"
"core/integration"
"core/invitation"
nodezapLog "core/loggers"
"core/socket"
tabledata "core/table_data"
"core/user"
userpreference "core/user_preference"
widgetcomment "core/widget_comment"
widgetdata "core/widget_data"
"core/workspace"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"reflect"
"strconv"
"strings"
"time"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/go-ozzo/ozzo-validation/v4/is"
"github.com/go-redis/redis/v8"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type internalGetAppRequest struct {
AppID string `json:"appID,omitempty"`
WorkspaceID string `json:"workspaceID,omitempty"`
}
//CreateAppRequest create app
type CreateAppRequest struct {
ID string `json:"id,omitempty"`
DisplayName *string `json:"displayName"`
Icon app.Icon `json:"icon"`
}
//UpdateAppRequest update app
type UpdateAppRequest struct {
DisplayName *string `json:"displayName"`
Icon app.Icon `json:"icon"`
Settings *app.Settings `json:"settings"`
}
type IntegrationMetadata struct {
ControlID string `json:"controlID"`
FlowID string `json:"flowID"`
BlockID string `json:"blockID"`
IntegrationID string `json:"integrationID"`
}
type IntegrationsMapping struct {
DevIntegrationId string `json:"devIntegrationId"`
ProdIntegrationId string `json:"prodIntegrationId"`
}
//PublishAppRequest publish app
type PublishAppRequest struct {
ProdEnvironment map[string]string `json:"prodEnvironment"`
IntegrationsMapping []IntegrationsMapping `json:"integrationsMapping"`
}
type PublishAppConfigurationResponse struct {
DevEnvironment map[string]string `json:"devEnvironment"`
ProdEnvironment map[string]string `json:"prodEnvironment"`
IntegrationsMapping []app.IntegrationMapping `json:"integrationsMapping"`
}
type getAppsRequest struct{}
//CollaboratorMetadata collaborator
type CollaboratorMetadata struct {
UserID string `json:"userID,omitempty"`
RoleID string `json:"roleID,omitempty"`
UpdateTime time.Time `json:"updateTime,omitempty"`
CreateTime time.Time `json:"createTime,omitempty"`
}
//GetAppsResponse get apps
type GetAppsResponse struct {
ID string `json:"id,omitempty"`
DisplayName string `json:"displayName,omitempty"`
Icon app.Icon `json:"icon,omitempty"`
WorkspaceID string `json:"workspaceID,omitempty"`
Settings app.Settings `json:"settings,omitempty"`
Collaborators map[string]CollaboratorMetadata `json:"collaborators,omitempty"`
Status app.AppStatus `json:"status"`
UpdateTime time.Time `json:"updateTime,omitempty"`
CreateTime time.Time `json:"createTime,omitempty"`
}
func appProjections() bson.M {
return bson.M{
"displayName": 1,
"icon": 1,
"workspaceID": 1,
"settings": 1,
"status": 1,
"updateTime": 1,
"createTime": 1,
}
}
func getApp(ctx context.Context, mongoDb *mongo.Database,
appID string, options *options.FindOneOptions) (*app.App, error) {
store := app.NewStore(ctx, mongoDb)
app, err := store.Get(appID, options)
if err != nil {
if err == mongo.ErrNoDocuments {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidAppErrorCode), appleError.AppError, appleError.InvalidApp)
}
return nil, err
}
return app, nil
}
/*********************************START GET APP********************************/
func getAppResponse(app *app.App, collaborator *collaborator.Collaborator) GetAppsResponse {
collaboratorMetadata := CollaboratorMetadata{
UserID: collaborator.UserID,
RoleID: collaborator.RoleID,
UpdateTime: collaborator.UpdateTime,
CreateTime: collaborator.CreateTime,
}
return GetAppsResponse{
ID: app.ID,
DisplayName: app.DisplayName,
Icon: app.Icon,
WorkspaceID: app.WorkspaceID,
Settings: app.Settings,
Collaborators: map[string]CollaboratorMetadata{
collaboratorMetadata.UserID: collaboratorMetadata,
},
Status: app.Status,
CreateTime: app.CreateTime,
UpdateTime: app.UpdateTime,
}
}
func (client *Client) getApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
var appRequest internalGetAppRequest
appRequest.AppID = mux.Vars(r)["appID"]
appRequest.WorkspaceID = mux.Vars(r)["workspaceID"]
err := client.Security.GetApp(appRequest.WorkspaceID, appRequest.AppID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
projections := appProjections()
store := app.NewStore(ctx, database)
app, err := store.Get(appRequest.AppID, options.FindOne().SetProjection(projections))
if err != nil {
if err == mongo.ErrNoDocuments {
json.NewEncoder(w).Encode(*NewResponse(ERROR, nil, client.getRequestID(),
appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidAppErrorCode), appleError.AppError, appleError.InvalidApp), client.LogEntry))
return
}
json.NewEncoder(w).Encode(*NewResponse(ERROR, nil, client.getRequestID(), err, client.LogEntry))
return
}
collaborator, err := getCollaborator(ctx, database, requestUser.UID, appRequest.AppID)
if err != nil {
json.NewEncoder(w).Encode(*NewResponse(ERROR, nil, client.getRequestID(), err, client.LogEntry))
return
}
response := getAppResponse(app, collaborator)
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, appRequest, response, err)
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/*********************************END GET APP**********************************/
/*********************************START CREATE APP*****************************/
func (appRequest *CreateAppRequest) validate() error {
err := validation.ValidateStruct(appRequest,
validation.Field(&appRequest.ID, validation.Required.Error("Id is required"),
is.MongoID.Error("Id must be a mongo objectId")),
validation.Field(&appRequest.DisplayName, validation.Required.Error("name is required"),
TrimmedLengthCheck("Name", 1, 64)),
validation.Field(&appRequest.Icon, validation.Required.Error("Icon is required")))
if err != nil {
return appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.RequestValidationErrorCode), appleError.AppError, getValidatorErrorString(err.Error()))
}
var appIcon *app.Icon
appIcon = &appRequest.Icon
err = validation.ValidateStruct(appIcon,
validation.Field(&appIcon.Type, validation.Required.Error("Icon type is required"),
validation.In("EMOJI", "IMAGE").Error("Icon type is either EMOJI or IMAGE")),
validation.Field(&appIcon.Source, validation.Required.Error("Icon source is required"),
TrimmedLengthCheck("Icon source", 1, 2000)))
if err != nil {
return appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.RequestValidationErrorCode), appleError.AppError, getValidatorErrorString(err.Error()))
}
return nil
}
func (appRequest *CreateAppRequest) create(ctx context.Context,
database *mongo.Database, newApp *app.App) (*mongo.InsertOneResult, error) {
store := app.NewStore(ctx, database)
createAppResult, err := store.Create(newApp)
if err != nil {
if reflect.TypeOf(err).String() == "mongo.WriteException" &&
len(err.(mongo.WriteException).WriteErrors) > 0 &&
err.(mongo.WriteException).WriteErrors[0].Code == 11000 {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.AppExistsErrorCode), appleError.AppError, appleError.AppExists)
}
return nil, err
}
return createAppResult, nil
}
//create dev and prod app
func (appRequest *CreateAppRequest) createMultipleApps(ctx context.Context,
database *mongo.Database, newApp *app.App, devApp *app.App) (*mongo.InsertManyResult, error) {
store := app.NewStore(ctx, database)
createAppResult, err := store.CreateMany([]interface{}{newApp, devApp})
if err != nil {
if reflect.TypeOf(err).String() == "mongo.BulkWriteException" &&
len(err.(mongo.BulkWriteException).WriteErrors) > 0 &&
err.(mongo.BulkWriteException).WriteErrors[0].Code == 11000 {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.AppExistsErrorCode), appleError.AppError, appleError.AppExists)
}
return nil, err
}
return createAppResult, nil
}
//create appAdmin for dev and prod apps
func (appRequest *CreateAppRequest) createAppAdmin(ctx context.Context,
database *mongo.Database, userID, workspaceID string) (*collaborator.Collaborator, error) {
store := collaborator.NewStore(ctx, database)
newCollaborator := collaborator.New(userID, appRequest.ID, app.AppAdmin, workspaceID)
newDevAppCollaborator := collaborator.New(userID, app.GetDevId(appRequest.ID),
app.AppAdmin, workspaceID)
_, err := store.CreateMany([]interface{}{newCollaborator, newDevAppCollaborator})
if err != nil {
if reflect.TypeOf(err).String() == "mongo.BulkWriteException" &&
len(err.(mongo.BulkWriteException).WriteErrors) > 0 &&
err.(mongo.BulkWriteException).WriteErrors[0].Code == 11000 {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.CollaboratorExistErrorCode), appleError.AppError,
appleError.CollaboratorExists)
}
return nil, err
}
return newCollaborator, nil
}
func (client *Client) createApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
requestBody, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.InvalidPayloadErrorCode), client.getRequestID())
return
}
var appRequest CreateAppRequest
err = json.Unmarshal(requestBody, &appRequest)
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.UnMarshallingErrorCode), client.getRequestID())
return
}
err = appRequest.validate()
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
workspaceID := mux.Vars(r)["workspaceID"]
err = client.Security.CreateApp(requestUser.UID, workspaceID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
session, err := client.MongoClient.StartSession()
defer session.EndSession(ctx)
response, err := session.WithTransaction(ctx, func(sessionContext mongo.SessionContext) (interface{}, error) {
newApp := app.New(appRequest.ID, strings.TrimSpace(*appRequest.DisplayName),
app.Icon{Type: appRequest.Icon.Type, Source: strings.TrimSpace(appRequest.Icon.Source),
EmojiUnifiedCode: appRequest.Icon.EmojiUnifiedCode},
workspaceID, requestUser.UID, requestUser.UID, app.Unpublished, 1)
newDevApp := app.New(app.GetDevId(appRequest.ID), strings.TrimSpace(*appRequest.DisplayName),
app.Icon{Type: appRequest.Icon.Type, Source: strings.TrimSpace(appRequest.Icon.Source),
EmojiUnifiedCode: appRequest.Icon.EmojiUnifiedCode},
workspaceID, requestUser.UID, requestUser.UID, app.Unpublished, 1)
newApp.CreateApp(requestUser.UID)
err = newApp.Validate()
if err != nil {
return nil, err
}
newDevApp.CreateApp(requestUser.UID)
err = newDevApp.Validate()
if err != nil {
return nil, err
}
_, err = appRequest.createMultipleApps(ctx, database, newApp, newDevApp)
if err != nil {
return nil, err
}
appCollaborator, err := appRequest.createAppAdmin(ctx, database, requestUser.UID,
workspaceID)
if err != nil {
return nil, err
}
response := getAppResponse(newApp, appCollaborator)
return response, nil
})
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, appRequest, response, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/*********************************END CREATE APP*******************************/
/*********************************START UPDATE APP*****************************/
func (appRequest *UpdateAppRequest) validate() error {
err := validation.ValidateStruct(appRequest,
validation.Field(&appRequest.Settings, validation.NotNil.Error("Settings is required")),
validation.Field(&appRequest.DisplayName, validation.Required.Error("name is required"),
TrimmedLengthCheck("Name", 1, 64)),
validation.Field(&appRequest.Icon, validation.Required.Error("Icon is required")))
if err != nil {
return appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.RequestValidationErrorCode), appleError.AppError, getValidatorErrorString(err.Error()))
}
var appIcon *app.Icon
appIcon = &appRequest.Icon
err = validation.ValidateStruct(appIcon,
validation.Field(&appIcon.Type, validation.Required.Error("Icon type is required"),
validation.In("EMOJI", "IMAGE").Error("Icon type is either EMOJI or IMAGE")),
validation.Field(&appIcon.Source, validation.Required.Error("Icon source is required"),
TrimmedLengthCheck("Icon source", 1, 2000)))
if err != nil {
return appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.RequestValidationErrorCode), appleError.AppError, getValidatorErrorString(err.Error()))
}
return nil
}
func (appRequest *UpdateAppRequest) createUpdateBson(userID string) bson.M {
updateBson := bson.M{
"displayName": strings.TrimSpace(*appRequest.DisplayName),
"icon": appRequest.Icon,
"settings": *appRequest.Settings,
"updaterID": userID,
"updateTime": time.Now().UTC(),
}
return bson.M{"$set": updateBson}
}
func (appRequest *UpdateAppRequest) update(ctx context.Context, database *mongo.Database,
appID, userID string, updateBson bson.M) (*mongo.UpdateResult, error) {
projections := bson.M{"_id": 1}
store := app.NewStore(ctx, database)
_, err := store.Get(appID, options.FindOne().SetProjection(projections))
if err != nil {
if err == mongo.ErrNoDocuments {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidAppErrorCode), appleError.AppError,
appleError.InvalidApp)
}
return nil, err
}
updateResult, err := store.Update(appID, updateBson)
if err != nil {
return nil, err
}
return updateResult, nil
}
func (client *Client) updateApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
requestBody, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.InvalidPayloadErrorCode), client.getRequestID())
return
}
var appRequest *UpdateAppRequest
err = json.Unmarshal(requestBody, &appRequest)
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.UnMarshallingErrorCode), client.getRequestID())
return
}
appID := mux.Vars(r)["appID"]
workspaceID := mux.Vars(r)["workspaceID"]
err = client.Security.UpdateApp(workspaceID, appID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
err = appRequest.validate()
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
updateBson := appRequest.createUpdateBson(requestUser.UID)
_, err = appRequest.update(ctx, database, appID, requestUser.UID, updateBson)
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, appRequest, appRequest, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
//notify
payloadOptions := socket.PayloadOptions{EventType: socket.UpdateApp, WorkspaceId: workspaceID,
AppId: appID, Data: appRequest, UserId: requestUser.UID}
socket.Notify(ctx, client.Redis, payloadOptions)
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, appRequest, client.getRequestID(), nil, client.LogEntry))
}
/*********************************END UPDATE APP*******************************/
/********************************START DELETE APP******************************/
/*
Prod Table as well as the dev table data is deleted
*/
func getTableIDs(tables map[string]app.Table, devTables map[string]app.Table) []string {
tableIDs := []string{}
for tableID := range tables {
tableIDs = append(tableIDs, tableID)
}
for tableID := range devTables {
tableIDs = append(tableIDs, tableID)
}
return tableIDs
}
func dropTableDataCollections(ctx context.Context,
mongoDb *mongo.Database, tableIDs []string) error {
store := tabledata.NewStore(ctx, mongoDb)
for _, tableID := range tableIDs {
err := store.DropCollection(app.GetDevId(tableID))
if err != nil {
return err
}
err = store.DropCollection(tableID)
if err != nil {
return err
}
}
return nil
}
func removefavoriteApp(ctx context.Context, mongoDb *mongo.Database, appID,
workspaceID string) (*mongo.UpdateResult, error) {
userPreferenceStore := userpreference.NewStore(ctx, mongoDb)
updateUserBson := bson.M{"$pull": bson.M{"workspaces." + workspaceID + ".favoriteApps": appID}}
updatedUserPreference, err := userPreferenceStore.UpdateMany(bson.M{},
updateUserBson)
if err != nil {
return nil, err
}
return updatedUserPreference, nil
}
func removeAppCollaborators(ctx context.Context, database *mongo.Database, appID, devAppId string) (*mongo.DeleteResult, error) {
store := collaborator.NewStore(ctx, database)
filter := bson.M{"appID": bson.M{"$in": []interface{}{appID, devAppId}}}
deleteResult, err := store.DeleteMany(filter, options.Delete())
if err != nil {
return nil, err
}
return deleteResult, nil
}
func removeWorkspaceAppCollaborators(ctx context.Context,
database *mongo.Database, workspaceID string, appID string) (*mongo.UpdateResult, error) {
updateBson := bson.M{"$pull": bson.M{"appCollaborators": bson.M{"appID": appID}},
"$set": bson.M{"updateTime": time.Now().UTC()}}
store := workspace.NewStore(ctx, database)
updateResult, err := store.Update(workspaceID, updateBson)
if err != nil {
return nil, err
}
return updateResult, nil
}
func deleteWidgetData(ctx context.Context,
database *mongo.Database, appID, devAppID string) (*mongo.DeleteResult, error) {
store := widgetdata.NewStore(ctx, database)
filter := bson.M{"appID": bson.M{"$in": []interface{}{appID, devAppID}}}
deleteResult, err := store.DeleteMany(filter, options.Delete())
if err != nil {
return nil, err
}
return deleteResult, nil
}
func deleteWidgetComments(ctx context.Context,
database *mongo.Database, appID, devAppID string) (*mongo.DeleteResult, error) {
store := widgetcomment.NewStore(ctx, database)
filter := bson.M{"appID": bson.M{"$in": []interface{}{appID, devAppID}}}
deleteResult, err := store.DeleteMany(filter, options.Delete())
if err != nil {
return nil, err
}
return deleteResult, nil
}
func deleteStoredApp(ctx context.Context, cacheClient *redis.Client,
appId string) error {
cacheStore := app.NewCacheStore(ctx, cacheClient)
appStoreKey := app.GetAppStoreKey(appId)
return cacheStore.Delete(appStoreKey)
}
func (client *Client) deleteApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
/*
Assuming appId will be of dev or prod
*/
appID := mux.Vars(r)["appID"]
workspaceID := mux.Vars(r)["workspaceID"]
err := client.Security.DeleteApp(workspaceID, appID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
//delete tables
workspaceDatabase := client.getDatabase(workspaceID)
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
existingApp, err := getApp(ctx, database, app.GetProdId(appID), options.FindOne())
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
existingDevApp, err := getApp(ctx, database, app.GetDevId(appID), options.FindOne())
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
tableIDs := getTableIDs(existingApp.Tables, existingDevApp.Tables)
if len(tableIDs) > 0 {
err = dropTableDataCollections(ctx, workspaceDatabase, tableIDs)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.InternalServerError),
appleError.AppError, err.Error()))
return
}
}
session, err := client.MongoClient.StartSession()
defer session.EndSession(ctx)
response, err := session.WithTransaction(ctx, func(sessionContext mongo.SessionContext) (interface{}, error) {
appStore := app.NewStore(sessionContext, database)
filter := bson.M{"_id": bson.M{"$in": []interface{}{app.GetProdId(appID), app.GetDevId(appID)}}}
deleteResult, err := appStore.DeleteMany(filter, &options.DeleteOptions{})
if err != nil {
return nil, err
}
if deleteResult.DeletedCount == 0 {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidAppErrorCode), appleError.AppError,
appleError.InvalidApp)
}
invitationStore := invitation.NewStore(sessionContext, database)
invitationUpdateBson := bson.M{"$set": bson.M{"valid": false, "updateTime": time.Now().UTC()}}
//even though invitation is sent using prod id, invitation update is done
//using both appId for precaution
_, err = invitationStore.UpdateMany(bson.M{"entityID": bson.M{"$in": []interface{}{app.GetProdId(appID),
app.GetDevId(appID)}}}, invitationUpdateBson)
if err != nil {
return nil, err
}
_, err = removeAppCollaborators(sessionContext, database, app.GetProdId(appID), app.GetDevId(appID))
if err != nil {
return nil, err
}
//only prod app is added to favorite list
_, err = removefavoriteApp(sessionContext, database, app.GetProdId(appID), workspaceID)
if err != nil {
return nil, err
}
_, err = deleteWidgetData(sessionContext, workspaceDatabase, app.GetProdId(appID), app.GetDevId(appID))
if err != nil {
return nil, err
}
_, err = deleteWidgetComments(sessionContext, workspaceDatabase, app.GetProdId(appID), app.GetDevId(appID))
if err != nil {
return nil, err
}
return map[string]interface{}{"id": appID}, nil
})
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, nil, response, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
//remove prod app from store
deleteStoredApp(ctx, client.Redis, appID)
//socket notify
payloadOptions := socket.PayloadOptions{EventType: socket.DeleteApp, WorkspaceId: workspaceID,
AppId: app.GetDevId(appID), UserId: requestUser.UID}
socket.Notify(ctx, client.Redis, payloadOptions)
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/********************************END DELETE APP*********************************/
/************************************START LIST APP*****************************/
func (appRequest *getAppsRequest) userRelatedAppIds(ctx context.Context, mongoDb *mongo.Database,
userID, workspaceID string) ([]string, []collaborator.Collaborator, error) {
store := collaborator.NewStore(ctx, mongoDb)
filter := bson.M{"userID": userID, "workspaceID": workspaceID}
collaborators, err := store.List(filter, options.Find())
if err != nil {
return nil, nil, err
}
appIDs := []string{}
for _, collaborator := range collaborators {
appIDs = append(appIDs, collaborator.AppID)
}
return appIDs, collaborators, nil
}
func (appRequest *getAppsRequest) pagination(queryParams url.Values) (app.Pagination, error) {
var pageNumber *int64
var pageSize *int64
pageNumberValue, err := strconv.ParseInt(queryParams.Get("pageNumber"), 10, 64)
if err == nil {
pageNumber = &pageNumberValue
}
if pageNumber != nil && *pageNumber < 0 {
return app.Pagination{}, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidPaginationErrorCode), appleError.AppError, appleError.PageNumberNegative)
}
pageSizeValue, err := strconv.ParseInt(queryParams.Get("pageSize"), 10, 64)
if err == nil {
pageSize = &pageSizeValue
}
if pageSize != nil && *pageSize < 0 {
return app.Pagination{}, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidPaginationErrorCode), appleError.AppError, appleError.PagesizeNegative)
}
page := app.Pagination{
PageNumber: pageNumber,
PageSize: pageSize,
}
return page, nil
}
func (appRequest *getAppsRequest) list(ctx context.Context, mongoDb *mongo.Database,
appIDs []string, queryParams url.Values, workspaceID string,
mapOfCollaborators map[string]collaborator.Collaborator) ([]app.App, error) {
page, err := appRequest.pagination(queryParams)
if err != nil {
return nil, err
}
if page.PageSize != nil && *page.PageSize == 0 {
return []app.App{}, nil
}
projection := appProjections()
filter := bson.M{"_id": bson.M{"$in": appIDs}, "workspaceID": workspaceID}
options := options.FindOptions{
Projection: projection,
}
limit := app.GetPageSize(page)
skip := app.GetSkipCount(page)
if limit != nil {
options.Limit = limit
}
if skip != nil {
options.Skip = skip
}
if len(projection) == 0 {
options.SetProjection(bson.M{"_id": 1})
}
store := app.NewStore(ctx, mongoDb)
apps, err := store.List(filter, &options)
if err != nil {
return nil, err
}
//only published apps if a user is non admin
newApplist := []app.App{}
for _, application := range apps {
if app.IsDevAppID(application.ID) {
continue
}
if string(application.Status) == "" {
continue
}
if mapOfCollaborators[application.ID].RoleID == app.AppAdmin {
newApplist = append(newApplist, application)
continue
}
if application.Status == app.Published {
newApplist = append(newApplist, application)
}
}
return newApplist, nil
}
func (appRequest *getAppsRequest) mapOfAppCollaborators(collaborators []collaborator.Collaborator) map[string]collaborator.Collaborator {
mapOfCollaborators := map[string]collaborator.Collaborator{}
for _, collaborator := range collaborators {
mapOfCollaborators[collaborator.AppID] = collaborator
}
return mapOfCollaborators
}
func (appRequest *getAppsRequest) createResponse(collaborators map[string]collaborator.Collaborator,
apps []app.App) []GetAppsResponse {
response := []GetAppsResponse{}
for _, app := range apps {
newCollaboratorMetadata := CollaboratorMetadata{
UserID: collaborators[app.ID].UserID,
RoleID: collaborators[app.ID].RoleID,
UpdateTime: collaborators[app.ID].UpdateTime,
CreateTime: collaborators[app.ID].CreateTime,
}
newResponse := GetAppsResponse{
ID: app.ID,
Icon: app.Icon,
DisplayName: app.DisplayName,
WorkspaceID: app.WorkspaceID,
Settings: app.Settings,
Collaborators: map[string]CollaboratorMetadata{
newCollaboratorMetadata.UserID: newCollaboratorMetadata,
},
Status: app.Status,
UpdateTime: app.UpdateTime,
CreateTime: app.CreateTime,
}
response = append(response, newResponse)
}
return response
}
func (client *Client) getApps(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
var appRequest *getAppsRequest
workspaceID := mux.Vars(r)["workspaceID"]
queryParams := r.URL.Query()
err := client.Security.GetApps(workspaceID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
//find all the apps where user is collaborator
userRelatedAppIDs, collaborators, err := appRequest.userRelatedAppIds(ctx, database,
requestUser.UID, workspaceID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
response := []GetAppsResponse{}
if len(collaborators) != 0 {
mapOfCollaborators := appRequest.mapOfAppCollaborators(collaborators)
apps, err := appRequest.list(ctx, database, userRelatedAppIDs, queryParams,
workspaceID, mapOfCollaborators)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
response = appRequest.createResponse(mapOfCollaborators, apps)
}
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, appRequest, response, err)
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/************************************END LIST APP******************************/
/*********************************START PUBLISH APP*****************************/
func (appRequest *PublishAppRequest) validate() error {
// err := validation.ValidateStruct(appRequest,
// validation.Field(&appRequest.ProdEnvironment, validation.NotNil.Error("ProdEnvironment is required")),
// )
// if err != nil {
// return appleError.New(appleError.GenerateErrorCode(appleError.App,
// appleError.RequestValidationErrorCode), appleError.AppError, getValidatorErrorString(err.Error()))
// }
return nil
}
func (appRequest *PublishAppRequest) createUpdateBson(application *app.App, userID string) bson.M {
updateBson := bson.M{
"displayName": application.DisplayName,
"icon": application.Icon,
"workspaceID": application.WorkspaceID,
"version": application.Version,
"settings": application.Settings,
"roles": application.Roles,
"tables": application.Tables,
"controls": application.Controls,
"pages": application.Pages,
"controlTableMap": application.ControlTableMap,
"status": application.Status,
"integrationMapping": application.IntegrationMapping,
"updaterID": userID,
"updateTime": time.Now().UTC(),
}
return bson.M{"$set": updateBson}
}
func (appRequest *PublishAppRequest) createDevAppUpdateBson(application *app.App, userID string) bson.M {
updateBson := bson.M{
"version": application.Version,
"updaterID": userID,
"updateTime": time.Now().UTC(),
}
return bson.M{"$set": updateBson}
}
func (appRequest *PublishAppRequest) update(ctx context.Context, database *mongo.Database,
appID string, updateBson bson.M) (*mongo.UpdateResult, error) {
projections := bson.M{"_id": 1}
store := app.NewStore(ctx, database)
_, err := store.Get(appID, options.FindOne().SetProjection(projections))
if err != nil {
if err == mongo.ErrNoDocuments {
return nil, appleError.New(appleError.GenerateErrorCode(appleError.App,
appleError.InvalidAppErrorCode), appleError.AppError,
appleError.InvalidApp)
}
return nil, err
}
updateResult, err := store.Update(appID, updateBson)
if err != nil {
return nil, err
}
return updateResult, nil
}
func (appRequest *PublishAppRequest) getProdAndDevApp(ctx context.Context,
database *mongo.Database, appID, devAppId string) (*app.App, *app.App, error) {
appStore := app.NewStore(ctx, database)
filter := bson.M{"_id": bson.M{"$in": []interface{}{appID, devAppId}}}
apps, err := appStore.List(filter, options.Find())
if err != nil {
//error
return nil, nil, err
}
var prodApp, devApp app.App
for _, application := range apps {
if application.ID == appID {
prodApp = application
continue
}
devApp = application
}
return &prodApp, &devApp, nil
}
func (appRequest *PublishAppRequest) getProdAppTableIdButNotInDevApp(
prodTables *map[string]app.Table, devTables *map[string]app.Table) []string {
tableIds := []string{}
for tableId, _ := range *prodTables {
if _, isTableInDevApp := (*devTables)[tableId]; isTableInDevApp {
continue
}
tableIds = append(tableIds, tableId)
}
return tableIds
}
func (appRequest *PublishAppRequest) getDeletableProdFields(
prodTables *map[string]app.Table, devTables *map[string]app.Table) map[string][]string {
deletableTableFields := map[string][]string{}
for tableId := range *prodTables {
fieldIds := []string{}
if _, isTableInDevApp := (*devTables)[tableId]; !isTableInDevApp {
continue
}
devTableFields := (*devTables)[tableId].Fields
prodTableFields := (*prodTables)[tableId].Fields
for fieldId := range prodTableFields {
if _, isFieldInDevApp := devTableFields[fieldId]; isFieldInDevApp {
continue
}
fieldIds = append(fieldIds, fieldId)
}
if len(fieldIds) > 0 {
deletableTableFields[tableId] = fieldIds
}
}
return deletableTableFields
}
func (appRequest *PublishAppRequest) deleteProdDataFields(ctx context.Context,
database *mongo.Database, prodAppId string, deletableTableFields map[string][]string) {
store := tabledata.NewStore(ctx, database)
for tableID, fields := range deletableTableFields {
filter := bson.M{}
updateBson := []interface{}{bson.M{"$unset": fields}}
store.UpdateMany(app.GetInferredTableId(prodAppId, tableID), filter, updateBson)
}
}
func (appRequest *PublishAppRequest) convertProdTableFields(ctx context.Context,
database *mongo.Database, prodAppId string, prodTables *map[string]app.Table, devTables *map[string]app.Table) error {
for tableID := range *prodTables {
if _, isTableInDevApp := (*devTables)[tableID]; !isTableInDevApp {
continue
}
devTableFields := (*devTables)[tableID].Fields
prodTableFields := (*prodTables)[tableID].Fields
err := appRequest.convertFields(ctx, database, tableID, prodAppId, prodTableFields,
devTableFields)
if err != nil {
return err
}
}
return nil
}
func (appRequest *PublishAppRequest) convertFields(ctx context.Context, database *mongo.Database,
tableID, prodAppId string, prodTableFields, devTableFields map[string]app.Field) error {
fieldDataTypeChanges := make(map[string]tabledata.DataFieldConvertion)
for fieldID, field := range prodTableFields {
if _, isFieldInDevApp := devTableFields[fieldID]; isFieldInDevApp {
continue
}
if len(devTableFields[fieldID].ID) == 0 {
continue
}
if devTableFields[fieldID].DataType != field.DataType {
fieldDataTypeChanges[app.GetInferredTableId(prodAppId, tableID)] = tabledata.DataFieldConvertion{
FieldID: devTableFields[fieldID].ID,
From: field.DataType,
To: devTableFields[fieldID].DataType,
}
if err := tabledata.ConvertFieldData(ctx, database, fieldDataTypeChanges); err != nil {
fmt.Println("Field Conversion Error:", err, "for",
fieldDataTypeChanges[app.GetInferredTableId(prodAppId, tableID)])
return appleError.New(appleError.GenerateErrorCode(appleError.Table,
appleError.FieldTypeConversionErrorCode), appleError.InternalServerError, err.Error())
}
}
}
return nil
}
func (appRequest *PublishAppRequest) createNewProdAppUsingDevApp(
existingProdApp, devApp *app.App) *app.App {
newProdApp := app.App{
ID: existingProdApp.ID,
DisplayName: devApp.DisplayName,
Icon: devApp.Icon,
WorkspaceID: devApp.WorkspaceID,
Version: devApp.Version,
Settings: devApp.Settings,
Roles: devApp.Roles,
Tables: devApp.Tables,
Controls: devApp.Controls,
Pages: devApp.Pages,
ControlTableMap: devApp.ControlTableMap,
Status: app.Published,
IntegrationMapping: devApp.IntegrationMapping,
CreatorID: existingProdApp.CreatorID,
UpdaterID: devApp.UpdaterID,
UpdateTime: devApp.UpdateTime,
CreateTime: existingProdApp.CreateTime,
}
if newProdApp.Controls == nil {
newProdApp.Controls = map[string]app.Control{}
}
if newProdApp.Tables == nil {
newProdApp.Tables = map[string]app.Table{}
}
return &newProdApp
}
func (appRequest *PublishAppRequest) updateEnvironments(devApp *app.App) {
for environmentKey, environmentValue := range appRequest.ProdEnvironment {
devApp.Settings.EnvironmentVariables[environmentKey] = environmentValue
}
}
/*
{
[devIntegrationId] : prodIntegrationId
}
*/
func (appRequest *PublishAppRequest) getDevAndProdIntegrationMapping() map[string]string {
devAndProdMapping := map[string]string{}
for _, integrationMapping := range appRequest.IntegrationsMapping {
devAndProdMapping[integrationMapping.DevIntegrationId] = integrationMapping.ProdIntegrationId
}
return devAndProdMapping
}
func (appRequest *PublishAppRequest) listIntegrations(ctx context.Context,
database *mongo.Database) ([]integration.Integration, error) {
integrationIds := []string{}
for _, integrationMetadata := range appRequest.IntegrationsMapping {
integrationIds = append(integrationIds, integrationMetadata.DevIntegrationId, integrationMetadata.ProdIntegrationId)
}
if len(integrationIds) == 0 {
return []integration.Integration{}, nil
}
store := integration.NewStore(ctx, database)
filter := bson.M{"_id": bson.M{"$in": integrationIds}}
projections := bson.M{"displayName": 1, "type": 1}
integrations, err := store.List(filter, options.Find().SetProjection(projections))
if err != nil {
return nil, err
}
return integrations, nil
}
func (appRequest *PublishAppRequest) updateIntegrationMapping(devApp *app.App,
integrations []integration.Integration) {
mapOfIntegration := map[string]integration.Integration{}
for _, integrationData := range integrations {
mapOfIntegration[integrationData.ID] = integrationData
}
newIntegrationMappings := []app.IntegrationMapping{}
for _, integrationMap := range appRequest.IntegrationsMapping {
newIntegrationMapping := app.IntegrationMapping{}
if _, integrationExists := mapOfIntegration[integrationMap.DevIntegrationId]; !integrationExists {
continue
}
if _, integrationExists := mapOfIntegration[integrationMap.ProdIntegrationId]; !integrationExists {
continue
}
newDevIntegration := app.Integration{
ID: mapOfIntegration[integrationMap.DevIntegrationId].ID,
DisplayName: mapOfIntegration[integrationMap.DevIntegrationId].DisplayName,
Type: string(mapOfIntegration[integrationMap.DevIntegrationId].Type),
}
newProdIntegration := app.Integration{
ID: mapOfIntegration[integrationMap.ProdIntegrationId].ID,
DisplayName: mapOfIntegration[integrationMap.ProdIntegrationId].DisplayName,
Type: string(mapOfIntegration[integrationMap.ProdIntegrationId].Type),
}
newIntegrationMapping.DevIntegration = &newDevIntegration
newIntegrationMapping.ProdIntegration = &newProdIntegration
newIntegrationMappings = append(newIntegrationMappings, newIntegrationMapping)
}
devApp.IntegrationMapping = newIntegrationMappings
}
func storeApp(ctx context.Context, cacheClient *redis.Client,
prodApp *app.App) error {
cacheStore := app.NewCacheStore(ctx, cacheClient)
appStoreKey := app.GetAppStoreKey(prodApp.ID)
storableApp, err := prodApp.GetStorableAbleApp()
fmt.Printf("storing app %s\n", string(storableApp))
if err != nil {
return err
}
return cacheStore.Set(appStoreKey, storableApp, app.AppExpiryTime)
}
func (appRequest *PublishAppRequest) logControlFlow(logEntry *logrus.Entry,
requestID, userID string, prodApp *app.App) {
for _, control := range prodApp.Controls {
// logging
nodezapLogTag := nodezapLog.Tags{WorkspaceID: prodApp.WorkspaceID,
AppID: prodApp.ID, UserID: userID, ControlID: control.ID,
RequestID: requestID}
//log post
nodezapLogTag.FlowID = string(app.PostID)
nodezapLogTag.FlowVersion = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Version
nodezapLogTag.Method = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Method
nodezapLog.ControlFlow(logEntry, nodezapLogTag, prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID])
//log put
nodezapLogTag.FlowID = string(app.PutID)
nodezapLogTag.FlowVersion = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Version
nodezapLogTag.Method = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Method
nodezapLog.ControlFlow(logEntry, nodezapLogTag, prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID])
//log list
nodezapLogTag.FlowID = string(app.ListID)
nodezapLogTag.FlowVersion = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Version
nodezapLogTag.Method = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Method
nodezapLog.ControlFlow(logEntry, nodezapLogTag, prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID])
//log delete
nodezapLogTag.FlowID = string(app.DeleteID)
nodezapLogTag.FlowVersion = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Version
nodezapLogTag.Method = prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID].Method
nodezapLog.ControlFlow(logEntry, nodezapLogTag, prodApp.Controls[nodezapLogTag.ControlID].Flows[nodezapLogTag.FlowID])
}
}
func (appRequest *PublishAppRequest) copyStaticWidgetData(ctx context.Context,
workspaceDatabase *mongo.Database, devAppId, prodAppId string) error {
store := widgetdata.NewStore(ctx, workspaceDatabase)
//get dev static widget data
filter := bson.M{"appID": devAppId}
devWidgetDataList, err := store.List(filter, options.Find())
if err != nil {
return err
}
//remove prod static widget data
deleteProdStaticWidgetDataFilter := bson.M{"appID": prodAppId}
_, err = store.DeleteMany(deleteProdStaticWidgetDataFilter, options.Delete())
if err != nil {
return err
}
//add static widget data to prod
prodWidgetDataList := []interface{}{}
for _, devWidgetData := range devWidgetDataList {
newProdWidgetData := widgetdata.WidgetData{
ID: primitive.NewObjectID().Hex(),
AppID: prodAppId,
PageID: devWidgetData.PageID,
WidgetID: devWidgetData.WidgetID,
Data: devWidgetData.Data,
CreatorID: devWidgetData.CreatorID,
UpdaterID: devWidgetData.UpdaterID,
CreateTime: devWidgetData.CreateTime,
UpdateTime: devWidgetData.UpdateTime,
}
prodWidgetDataList = append(prodWidgetDataList, newProdWidgetData)
}
if len(prodWidgetDataList) == 0 {
return nil
}
_, err = store.CreateMany(prodWidgetDataList)
if err != nil {
return err
}
return nil
}
func (client *Client) publishApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
requestBody, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.InvalidPayloadErrorCode), client.getRequestID())
return
}
var appRequest *PublishAppRequest
err = json.Unmarshal(requestBody, &appRequest)
if err != nil {
setBadRequestErrorResponse(w, client.LogEntry, appleError.GenerateErrorCode(appleError.App,
appleError.UnMarshallingErrorCode), client.getRequestID())
return
}
//dev app id
appID := mux.Vars(r)["appID"]
workspaceID := mux.Vars(r)["workspaceID"]
err = client.Security.UpdateApp(workspaceID, appID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
err = appRequest.validate()
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
workspaceDatabase := client.getDatabase(workspaceID)
var publishableApp *app.App
var deletableTableIds []string
session, err := client.MongoClient.StartSession()
defer session.EndSession(ctx)
response, err := session.WithTransaction(ctx, func(sessionContext mongo.SessionContext) (interface{}, error) {
prodApp, devApp, err := appRequest.getProdAndDevApp(sessionContext, database,
app.GetProdId(appID), appID)
if err != nil {
return nil, err
}
deletableTableIds = appRequest.getProdAppTableIdButNotInDevApp(&prodApp.Tables, &devApp.Tables)
deletableFields := appRequest.getDeletableProdFields(&prodApp.Tables, &devApp.Tables)
appRequest.deleteProdDataFields(ctx, workspaceDatabase, prodApp.ID, deletableFields)
err = appRequest.convertProdTableFields(ctx, workspaceDatabase, prodApp.ID,
&prodApp.Tables, &devApp.Tables)
if err != nil {
return nil, err
}
err = appRequest.copyStaticWidgetData(sessionContext, workspaceDatabase,
devApp.ID, prodApp.ID)
if err != nil {
return nil, err
}
integrationsMapping := appRequest.getDevAndProdIntegrationMapping()
appRequest.updateEnvironments(devApp)
integrations, err := appRequest.listIntegrations(sessionContext, database)
if err != nil {
return nil, err
}
devApp.UpdateControlIntegrations(integrationsMapping)
appRequest.updateIntegrationMapping(devApp, integrations)
/*
update version
*/
devApp.Version = devApp.Version + 1
publishableApp = appRequest.createNewProdAppUsingDevApp(prodApp, devApp)
updateBson := appRequest.createUpdateBson(publishableApp, requestUser.UID)
updateDevAppBson := appRequest.createDevAppUpdateBson(devApp, requestUser.UID)
_, err = appRequest.update(ctx, database, prodApp.ID, updateBson)
if err != nil {
return nil, err
}
//update devapp
_, err = appRequest.update(ctx, database, devApp.ID, updateDevAppBson)
if err != nil {
return nil, err
}
return publishableApp, nil
})
requestId := client.getRequestID()
nodezapLogTags := nodezapLog.Tags{UserID: requestUser.UID, WorkspaceID: workspaceID,
RequestID: requestId}
if err == nil {
if len(deletableTableIds) > 0 {
dropTableDataCollections(ctx, workspaceDatabase, deletableTableIds)
}
//store app
storeApp(ctx, client.Redis, publishableApp)
//log controls
appRequest.logControlFlow(client.LogEntry, requestId, requestUser.UID, publishableApp)
//log prod app
nodezapLog.AppVersions(client.LogEntry, nodezapLogTags, *publishableApp)
}
//logging
nodezapLog.Debug(client.LogEntry, nodezapLogTags, appRequest, appRequest, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, requestId, err)
return
}
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, requestId, nil, client.LogEntry))
}
/*********************************END PUBLISH APP*******************************/
/*********************************START PUBLISH CONFIGURATION*******************/
func (appRequest *PublishAppConfigurationResponse) getProdAndDevApp(ctx context.Context,
database *mongo.Database, appID, devAppId string) (*app.App, *app.App, error) {
appStore := app.NewStore(ctx, database)
filter := bson.M{"_id": bson.M{"$in": []interface{}{appID, devAppId}}}
apps, err := appStore.List(filter, options.Find())
if err != nil {
//error
return nil, nil, err
}
var prodApp, devApp app.App
for _, application := range apps {
if application.ID == appID {
prodApp = application
continue
}
devApp = application
}
return &prodApp, &devApp, nil
}
func (appRequest *PublishAppConfigurationResponse) listIntegrations(ctx context.Context,
database *mongo.Database, integrationMapping []app.IntegrationMapping,
newIntegrationIds []string) (map[string]app.Integration, error) {
mapOfIntegrations := map[string]app.Integration{}
integrationIds := []string{}
for _, integrationData := range integrationMapping {
integrationIds = append(integrationIds, integrationData.DevIntegration.ID,
integrationData.ProdIntegration.ID)
}
integrationIds = append(integrationIds, newIntegrationIds...)
if len(integrationIds) == 0 {
return mapOfIntegrations, nil
}
store := integration.NewStore(ctx, database)
filter := bson.M{"_id": bson.M{"$in": integrationIds}}
projections := bson.M{"displayName": 1, "type": 1}
integrations, err := store.List(filter, options.Find().SetProjection(projections))
if err != nil {
return nil, err
}
for _, integrationData := range integrations {
newIntegration := app.Integration{
ID: integrationData.ID,
DisplayName: integrationData.DisplayName,
Type: string(integrationData.Type),
}
mapOfIntegrations[integrationData.ID] = newIntegration
}
return mapOfIntegrations, nil
}
/*[
{
//new
dev : new1
prod :
},
{
//changed
dev : new2
prod : new2
},
{
//removed
dev :
prod : ""
},
{
//new
dev : new3
prod : "" //prod resource was deleted
}
]
**/
func (appRequest *PublishAppConfigurationResponse) newIntegrationMapping(
mapOfIntegrations map[string]app.Integration, integrationMapping []app.IntegrationMapping,
associatedIntegrationIds []string) []app.IntegrationMapping {
newIntegrationMappings := []app.IntegrationMapping{}
//to map
devAssociatedIntegrationIds := map[string]bool{}
for _, integrationId := range associatedIntegrationIds {
devAssociatedIntegrationIds[integrationId] = true
}
for _, devAndProdIntegration := range integrationMapping {
_, devIntegrationExists := mapOfIntegrations[devAndProdIntegration.DevIntegration.ID]
_, associatedWithDevApp := devAssociatedIntegrationIds[devAndProdIntegration.DevIntegration.ID]
_, prodIntegrationExists := mapOfIntegrations[devAndProdIntegration.ProdIntegration.ID]
newIntegrationMapping := app.IntegrationMapping{}
if devIntegrationExists && associatedWithDevApp {
newAppIntegration := app.Integration{
ID: mapOfIntegrations[devAndProdIntegration.DevIntegration.ID].ID,
DisplayName: mapOfIntegrations[devAndProdIntegration.DevIntegration.ID].DisplayName,
Type: mapOfIntegrations[devAndProdIntegration.DevIntegration.ID].Type,
}
newIntegrationMapping.DevIntegration = &newAppIntegration
}
if prodIntegrationExists {
newAppIntegration := app.Integration{
ID: mapOfIntegrations[devAndProdIntegration.ProdIntegration.ID].ID,
DisplayName: mapOfIntegrations[devAndProdIntegration.ProdIntegration.ID].DisplayName,
Type: mapOfIntegrations[devAndProdIntegration.ProdIntegration.ID].Type,
}
newIntegrationMapping.ProdIntegration = &newAppIntegration
}
if devIntegrationExists || prodIntegrationExists {
newIntegrationMappings = append(newIntegrationMappings, newIntegrationMapping)
}
}
devMappedIntegrationId := map[string]bool{}
for _, devAndProdIntegration := range integrationMapping {
devMappedIntegrationId[devAndProdIntegration.DevIntegration.ID] = true
}
for integrationId := range devAssociatedIntegrationIds {
if _, integrationExists := devMappedIntegrationId[integrationId]; integrationExists {
delete(devAssociatedIntegrationIds, integrationId)
}
}
for integrationId := range devAssociatedIntegrationIds {
if _, integrationExists := mapOfIntegrations[integrationId]; !integrationExists {
continue
}
newIntegrationMapping := app.IntegrationMapping{}
newAppIntegration := app.Integration{
ID: mapOfIntegrations[integrationId].ID,
DisplayName: mapOfIntegrations[integrationId].DisplayName,
Type: mapOfIntegrations[integrationId].Type,
}
newIntegrationMapping.DevIntegration = &newAppIntegration
newIntegrationMappings = append(newIntegrationMappings, newIntegrationMapping)
}
return newIntegrationMappings
}
func (appRequest *PublishAppConfigurationResponse) getResponse(mapOfIntegrations map[string]app.Integration,
integrationMapping []app.IntegrationMapping, devEnvironment, prodEnvironment map[string]string) PublishAppConfigurationResponse {
newResponse := PublishAppConfigurationResponse{
DevEnvironment: devEnvironment,
ProdEnvironment: prodEnvironment,
IntegrationsMapping: integrationMapping,
}
if integrationMapping == nil {
newResponse.IntegrationsMapping = []app.IntegrationMapping{}
}
return newResponse
}
func (client *Client) publishAppConfiguration(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
appRequest := PublishAppConfigurationResponse{}
//dev app id
appID := mux.Vars(r)["appID"]
workspaceID := mux.Vars(r)["workspaceID"]
err := client.Security.UpdateApp(workspaceID, appID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
session, err := client.MongoClient.StartSession()
defer session.EndSession(ctx)
response, err := session.WithTransaction(ctx, func(sessionContext mongo.SessionContext) (interface{}, error) {
prodApp, devApp, err := appRequest.getProdAndDevApp(sessionContext, database,
app.GetProdId(appID), appID)
if err != nil {
return nil, err
}
listOfAssociatedIntegrationIDs, _ := app.GetAssociatedIntegrations(&devApp.Controls)
integrations, err := appRequest.listIntegrations(sessionContext, database,
prodApp.IntegrationMapping, listOfAssociatedIntegrationIDs)
if err != nil {
return nil, err
}
newIntegrationMapping := appRequest.newIntegrationMapping(integrations,
prodApp.IntegrationMapping, listOfAssociatedIntegrationIDs)
newResponse := appRequest.getResponse(integrations, newIntegrationMapping,
devApp.Settings.EnvironmentVariables, prodApp.Settings.EnvironmentVariables)
return newResponse, nil
})
//logging
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
RequestID: client.getRequestID()}, appRequest, appRequest, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/*********************************END PUBLISH CONFIGURATION********************/
/*********************************START UN PUBLISH*****************************/
func getUnPublishAppUpdateBson(appStatus app.AppStatus, userID string) bson.M {
updateBson := bson.M{
"status": appStatus,
"updaterID": userID,
"updateTime": time.Now().UTC(),
}
return bson.M{"$set": updateBson}
}
func (client *Client) unPublishApp(w http.ResponseWriter, r *http.Request) {
requestUser := r.Context().Value(authUser).(user.User)
ctx, cancel := newRequestContext(r)
defer cancel()
/*
Assuming appId will be of prod or dev
*/
appID := mux.Vars(r)["appID"]
workspaceID := mux.Vars(r)["workspaceID"]
err := client.Security.UnPublishApp(workspaceID, appID, requestUser.UID)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), appleError.New(
appleError.GenerateErrorCode(appleError.App, appleError.ForbiddenErrorCode),
appleError.ForbiddenError, err.Error()))
return
}
database := client.getDatabase(os.Getenv("MONGO_DB_NAME"))
var publishableApp *app.App
session, err := client.MongoClient.StartSession()
defer session.EndSession(ctx)
response, err := session.WithTransaction(ctx, func(sessionContext mongo.SessionContext) (interface{}, error) {
appStore := app.NewStore(sessionContext, database)
publishableApp, err = getApp(sessionContext, database, app.GetProdId(appID), options.FindOne())
if err != nil {
return nil, err
}
publishableApp.Status = app.Unpublished
updateBson := getUnPublishAppUpdateBson(publishableApp.Status, requestUser.UID)
_, err = appStore.Update(publishableApp.ID, updateBson)
if err != nil {
return nil, err
}
return publishableApp, nil
})
if err == nil {
//store app
storeApp(ctx, client.Redis, publishableApp)
}
nodezapLog.Debug(client.LogEntry, nodezapLog.Tags{UserID: requestUser.UID,
AppID: appID, WorkspaceID: workspaceID, RequestID: client.getRequestID()},
nil, response, err)
if err != nil {
setAppleErrorResponse(w, client.LogEntry, client.getRequestID(), err)
return
}
json.NewEncoder(w).Encode(*NewResponse(SUCCESS, response, client.getRequestID(), nil, client.LogEntry))
}
/*********************************END UN PUBLISH*****************************/
| [
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\"",
"\"MONGO_DB_NAME\""
]
| []
| [
"MONGO_DB_NAME"
]
| [] | ["MONGO_DB_NAME"] | go | 1 | 0 | |
venv/Lib/site-packages/joblib/_multiprocessing_helpers.py | """Helper module to factorize the conditional multiprocessing import logic
We use a distinct module to simplify import statements and avoid introducing
circular dependencies (for instance for the assert_spawning name).
"""
import os
import sys
import warnings
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
except ImportError:
mp = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if mp is not None:
try:
# Use the spawn context
if sys.version_info < (3, 3):
Semaphore = mp.Semaphore
else:
# Using mp.Semaphore has a border effect and set the default
# backend for multiprocessing. To avoid that, we use the 'spawn'
# context which is available on all supported platforms.
ctx = mp.get_context('spawn')
Semaphore = ctx.Semaphore
_sem = Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
# 3rd stage: backward compat for the assert_spawning helper
if mp is not None:
try:
# Python 3.4+
from multiprocessing.context import assert_spawning
except ImportError:
from multiprocessing.forking import assert_spawning
else:
assert_spawning = None
| []
| []
| [
"JOBLIB_MULTIPROCESSING"
]
| [] | ["JOBLIB_MULTIPROCESSING"] | python | 1 | 0 | |
tvm/dmlc-core/tracker/dmlc_tracker/launcher.py | #!/usr/bin/env python
# pylint: disable=invalid-name
"""The container launcher script that launches DMLC with the right env variable."""
import glob
import sys
import os
import subprocess
def unzip_archives(ar_list, env):
for fname in ar_list:
if not os.path.exists(fname):
continue
if fname.endswith('.zip'):
subprocess.call(args=['unzip', fname], env=env)
elif fname.find('.tar') != -1:
subprocess.call(args=['tar', '-xf', fname], env=env)
def main():
"""Main moduke of the launcher."""
if len(sys.argv) < 2:
print('Usage: launcher.py your command')
sys.exit(0)
hadoop_home = os.getenv('HADOOP_HOME')
hdfs_home = os.getenv('HADOOP_HDFS_HOME')
java_home = os.getenv('JAVA_HOME')
hadoop_home = os.getenv('HADOOP_PREFIX') if hadoop_home is None else hadoop_home
cluster = os.getenv('DMLC_JOB_CLUSTER')
assert cluster is not None, 'need to have DMLC_JOB_CLUSTER'
env = os.environ.copy()
library_path = ['./']
class_path = []
if cluster == 'yarn':
assert hadoop_home is not None, 'need to set HADOOP_HOME'
assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME'
assert java_home is not None, 'need to set JAVA_HOME'
if cluster == 'sge':
num_worker = int(env['DMLC_NUM_WORKER'])
task_id = int(env['DMLC_TASK_ID'])
if task_id < num_worker:
env['DMLC_ROLE'] = 'worker'
else:
env['DMLC_ROLE'] = 'server'
if hadoop_home:
library_path.append('%s/lib/native' % hdfs_home)
library_path.append('%s/lib' % hdfs_home)
(classpath, _) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home,
stdout=subprocess.PIPE, shell=True,
env=os.environ).communicate()
for f in classpath.split(':'):
class_path += glob.glob(f)
if java_home:
library_path.append('%s/jre/lib/amd64/server' % java_home)
env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(class_path))
# setup hdfs options
if 'DMLC_HDFS_OPTS' in env:
env['LIBHDFS_OPTS'] = env['DMLC_HDFS_OPTS']
elif 'LIBHDFS_OPTS' not in env:
env['LIBHDFS_OPTS'] = '--Xmx128m'
LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''
env['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH + ':' + ':'.join(library_path)
# unzip the archives.
if 'DMLC_JOB_ARCHIVES' in env:
unzip_archives(env['DMLC_JOB_ARCHIVES'].split(':'), env)
ret = subprocess.call(args=sys.argv[1:], env=env)
sys.exit(ret)
if __name__ == '__main__':
main()
| []
| []
| [
"HADOOP_PREFIX",
"JAVA_HOME",
"HADOOP_HDFS_HOME",
"HADOOP_HOME",
"DMLC_JOB_CLUSTER"
]
| [] | ["HADOOP_PREFIX", "JAVA_HOME", "HADOOP_HDFS_HOME", "HADOOP_HOME", "DMLC_JOB_CLUSTER"] | python | 5 | 0 | |
libcloud/common/base.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from typing import Type
from typing import Optional
import json
import os
import ssl
import socket
import copy
import binascii
import time
from libcloud.utils.py3 import ET
import requests
import libcloud
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import urlencode
from libcloud.utils.misc import lowercase_keys, retry
from libcloud.common.exceptions import exception_from_message
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.http import LibcloudConnection, HttpLibResponseProxy
__all__ = [
'RETRY_FAILED_HTTP_REQUESTS',
'BaseDriver',
'Connection',
'PollingConnection',
'ConnectionKey',
'ConnectionUserAndKey',
'CertificateConnection',
'Response',
'HTTPResponse',
'JsonResponse',
'XmlResponse',
'RawResponse'
]
# Module level variable indicates if the failed HTTP requests should be retried
RETRY_FAILED_HTTP_REQUESTS = False
class LazyObject(object):
"""An object that doesn't get initialized until accessed."""
@classmethod
def _proxy(cls, *lazy_init_args, **lazy_init_kwargs):
class Proxy(cls, object):
_lazy_obj = None
def __init__(self):
# Must override the lazy_cls __init__
pass
def __getattribute__(self, attr):
lazy_obj = object.__getattribute__(self, '_get_lazy_obj')()
return getattr(lazy_obj, attr)
def __setattr__(self, attr, value):
lazy_obj = object.__getattribute__(self, '_get_lazy_obj')()
setattr(lazy_obj, attr, value)
def _get_lazy_obj(self):
lazy_obj = object.__getattribute__(self, '_lazy_obj')
if lazy_obj is None:
lazy_obj = cls(*lazy_init_args, **lazy_init_kwargs)
object.__setattr__(self, '_lazy_obj', lazy_obj)
return lazy_obj
return Proxy()
@classmethod
def lazy(cls, *lazy_init_args, **lazy_init_kwargs):
"""Create a lazily instantiated instance of the subclass, cls."""
return cls._proxy(*lazy_init_args, **lazy_init_kwargs)
class HTTPResponse(httplib.HTTPResponse):
# On python 2.6 some calls can hang because HEAD isn't quite properly
# supported.
# In particular this happens on S3 when calls are made to get_object to
# objects that don't exist.
# This applies the behaviour from 2.7, fixing the hangs.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
return httplib.HTTPResponse.read(self, amt)
class Response(object):
"""
A base Response class to derive from.
"""
# Response status code
status = httplib.OK # type: int
# Response headers
headers = {} # type: dict
# Raw response body
body = None
# Parsed response body
object = None
error = None # Reason returned by the server.
connection = None # Parent connection class
parse_zero_length_body = False
def __init__(self, response, connection):
"""
:param response: HTTP response object. (optional)
:type response: :class:`httplib.HTTPResponse`
:param connection: Parent connection object.
:type connection: :class:`.Connection`
"""
self.connection = connection
# http.client In Python 3 doesn't automatically lowercase the header
# names
self.headers = lowercase_keys(dict(response.headers))
self.error = response.reason
self.status = response.status_code
self.request = response.request
self.iter_content = response.iter_content
self.body = response.text.strip() \
if response.text is not None and hasattr(response.text, 'strip') \
else ''
if not self.success():
raise exception_from_message(code=self.status,
message=self.parse_error(),
headers=self.headers)
self.object = self.parse_body()
def parse_body(self):
"""
Parse response body.
Override in a provider's subclass.
:return: Parsed body.
:rtype: ``str``
"""
return self.body if self.body is not None else ''
def parse_error(self):
"""
Parse the error messages.
Override in a provider's subclass.
:return: Parsed error.
:rtype: ``str``
"""
return self.body
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
:rtype: ``bool``
:return: ``True`` or ``False``
"""
# pylint: disable=E1101
return self.status in [requests.codes.ok, requests.codes.created,
httplib.OK, httplib.CREATED, httplib.ACCEPTED]
class JsonResponse(Response):
"""
A Base JSON Response class to derive from.
"""
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
body = json.loads(self.body)
except Exception:
raise MalformedResponseError(
'Failed to parse JSON',
body=self.body,
driver=self.connection.driver)
return body
parse_error = parse_body
class XmlResponse(Response):
"""
A Base XML Response class to derive from.
"""
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
try:
body = ET.XML(self.body)
except ValueError:
# lxml wants a bytes and tests are basically hard-coded to str
body = ET.XML(self.body.encode('utf-8'))
except Exception:
raise MalformedResponseError('Failed to parse XML',
body=self.body,
driver=self.connection.driver)
return body
parse_error = parse_body
class RawResponse(Response):
def __init__(self, connection, response=None):
"""
:param connection: Parent connection object.
:type connection: :class:`.Connection`
"""
self._status = None
self._response = None
self._headers = {}
self._error = None
self._reason = None
self.connection = connection
if response is not None:
self.headers = lowercase_keys(dict(response.headers))
self.error = response.reason
self.status = response.status_code
self.request = response.request
self.iter_content = response.iter_content
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
:rtype: ``bool``
:return: ``True`` or ``False``
"""
# pylint: disable=E1101
return self.status in [requests.codes.ok, requests.codes.created,
httplib.OK, httplib.CREATED, httplib.ACCEPTED]
@property
def response(self):
if not self._response:
response = self.connection.connection.getresponse()
self._response = HttpLibResponseProxy(response)
if not self.success():
self.parse_error()
return self._response
@property
def body(self):
# Note: We use property to avoid saving whole response body into RAM
# See https://github.com/apache/libcloud/pull/1132 for details
return self.response.body
@property
def reason(self):
if not self._reason:
self._reason = self.response.reason
return self._reason
class Connection(object):
"""
A Base Connection class to derive from.
"""
conn_class = LibcloudConnection
responseCls = Response
rawResponseCls = RawResponse
connection = None
host = '127.0.0.1' # type: str
port = 443
timeout = None # type: Optional[Union[int, float]]
secure = 1
driver = None # type: Type[BaseDriver]
action = None
cache_busting = False
backoff = None
retry_delay = None
allow_insecure = True
def __init__(self, secure=True, host=None, port=None, url=None,
timeout=None, proxy_url=None, retry_delay=None, backoff=None):
self.secure = secure and 1 or 0
self.ua = []
self.context = {}
if not self.allow_insecure and not secure:
# TODO: We should eventually switch to whitelist instead of
# blacklist approach
raise ValueError('Non https connections are not allowed (use '
'secure=True)')
self.request_path = ''
if host:
self.host = host
if port is not None:
self.port = port
else:
if self.secure == 1:
self.port = 443
else:
self.port = 80
if url:
(self.host, self.port, self.secure,
self.request_path) = self._tuple_from_url(url)
self.timeout = timeout or self.timeout
self.retry_delay = retry_delay
self.backoff = backoff
self.proxy_url = proxy_url
def set_http_proxy(self, proxy_url):
"""
Set a HTTP / HTTPS proxy which will be used with this connection.
:param proxy_url: Proxy URL (e.g. http://<hostname>:<port> without
authentication and
<scheme>://<username>:<password>@<hostname>:<port>
for basic auth authentication information.
:type proxy_url: ``str``
"""
self.proxy_url = proxy_url
# NOTE: Because of the way connection instantion works, we need to call
# self.connection.set_http_proxy() here. Just setting "self.proxy_url"
# won't work.
self.connection.set_http_proxy(proxy_url=proxy_url)
def set_context(self, context):
if not isinstance(context, dict):
raise TypeError('context needs to be a dictionary')
self.context = context
def reset_context(self):
self.context = {}
def _tuple_from_url(self, url):
secure = 1
port = None
(scheme, netloc, request_path, param,
query, fragment) = urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url))
if scheme == "http":
secure = 0
if ":" in netloc:
netloc, port = netloc.rsplit(":")
port = int(port)
if not port:
if scheme == "http":
port = 80
else:
port = 443
host = netloc
port = int(port)
return (host, port, secure, request_path)
def connect(self, host=None, port=None, base_url=None, **kwargs):
"""
Establish a connection with the API server.
:type host: ``str``
:param host: Optional host to override our default
:type port: ``int``
:param port: Optional port to override our default
:returns: A connection
"""
# prefer the attribute base_url if its set or sent
connection = None
secure = self.secure
if getattr(self, 'base_url', None) and base_url is None:
(host, port,
secure, request_path) = \
self._tuple_from_url(getattr(self, 'base_url'))
elif base_url is not None:
(host, port,
secure, request_path) = self._tuple_from_url(base_url)
else:
host = host or self.host
port = port or self.port
# Make sure port is an int
port = int(port)
if not hasattr(kwargs, 'host'):
kwargs.update({'host': host})
if not hasattr(kwargs, 'port'):
kwargs.update({'port': port})
if not hasattr(kwargs, 'secure'):
kwargs.update({'secure': self.secure})
if not hasattr(kwargs, 'key_file') and hasattr(self, 'key_file'):
kwargs.update({'key_file': getattr(self, 'key_file')})
if not hasattr(kwargs, 'cert_file') and hasattr(self, 'cert_file'):
kwargs.update({'cert_file': getattr(self, 'cert_file')})
if self.timeout:
kwargs.update({'timeout': self.timeout})
if self.proxy_url:
kwargs.update({'proxy_url': self.proxy_url})
connection = self.conn_class(**kwargs)
# You can uncoment this line, if you setup a reverse proxy server
# which proxies to your endpoint, and lets you easily capture
# connections in cleartext when you setup the proxy to do SSL
# for you
# connection = self.conn_class("127.0.0.1", 8080)
self.connection = connection
def _user_agent(self):
user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua])
if self.driver:
user_agent = 'libcloud/%s (%s) %s' % (
libcloud.__version__,
self.driver.name, user_agent_suffix)
else:
user_agent = 'libcloud/%s %s' % (
libcloud.__version__, user_agent_suffix)
return user_agent
def user_agent_append(self, token):
"""
Append a token to a user agent string.
Users of the library should call this to uniquely identify their
requests to a provider.
:type token: ``str``
:param token: Token to add to the user agent.
"""
self.ua.append(token)
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False, stream=False):
"""
Request a given `action`.
Basically a wrapper around the connection
object's `request` that does some helpful pre-processing.
:type action: ``str``
:param action: A path. This can include arguments. If included,
any extra parameters are appended to the existing ones.
:type params: ``dict``
:param params: Optional mapping of additional parameters to send. If
None, leave as an empty ``dict``.
:type data: ``unicode``
:param data: A body of data to send with the request.
:type headers: ``dict``
:param headers: Extra headers to add to the request
None, leave as an empty ``dict``.
:type method: ``str``
:param method: An HTTP method such as "GET" or "POST".
:type raw: ``bool``
:param raw: True to perform a "raw" request aka only send the headers
and use the rawResponseCls class. This is used with
storage API when uploading a file.
:type stream: ``bool``
:param stream: True to return an iterator in Response.iter_content
and allow streaming of the response data
(for downloading large files)
:return: An :class:`Response` instance.
:rtype: :class:`Response` instance
"""
if params is None:
params = {}
else:
params = copy.copy(params)
if headers is None:
headers = {}
else:
headers = copy.copy(headers)
retry_enabled = os.environ.get('LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS',
False) or RETRY_FAILED_HTTP_REQUESTS
action = self.morph_action_hook(action)
self.action = action
self.method = method
self.data = data
# Extend default parameters
params = self.add_default_params(params)
# Add cache busting parameters (if enabled)
if self.cache_busting and method == 'GET':
params = self._add_cache_busting_to_params(params=params)
# Extend default headers
headers = self.add_default_headers(headers)
# We always send a user-agent header
headers.update({'User-Agent': self._user_agent()})
# Indicate that we support gzip and deflate compression
headers.update({'Accept-Encoding': 'gzip,deflate'})
port = int(self.port)
if port not in (80, 443):
headers.update({'Host': "%s:%d" % (self.host, port)})
else:
headers.update({'Host': self.host})
if data:
data = self.encode_data(data)
params, headers = self.pre_connect_hook(params, headers)
if params:
if '?' in action:
url = '&'.join((action, urlencode(params, doseq=True)))
else:
url = '?'.join((action, urlencode(params, doseq=True)))
else:
url = action
# IF connection has not yet been established
if self.connection is None:
self.connect()
try:
# @TODO: Should we just pass File object as body to request method
# instead of dealing with splitting and sending the file ourselves?
if raw:
self.connection.prepared_request(
method=method,
url=url,
body=data,
headers=headers,
raw=raw,
stream=stream)
else:
if retry_enabled:
retry_request = retry(timeout=self.timeout,
retry_delay=self.retry_delay,
backoff=self.backoff)
retry_request(self.connection.request)(method=method,
url=url,
body=data,
headers=headers,
stream=stream)
else:
self.connection.request(method=method, url=url, body=data,
headers=headers, stream=stream)
except socket.gaierror as e:
message = str(e)
errno = getattr(e, 'errno', None)
if errno == -5:
# Throw a more-friendly exception on "no address associated
# with hostname" error. This error could simpli indicate that
# "host" Connection class attribute is set to an incorrect
# value
class_name = self.__class__.__name__
msg = ('%s. Perhaps "host" Connection class attribute '
'(%s.connection) is set to an invalid, non-hostname '
'value (%s)?' %
(message, class_name, self.host))
raise socket.gaierror(msg)
self.reset_context()
raise e
except ssl.SSLError as e:
self.reset_context()
raise ssl.SSLError(str(e))
if raw:
responseCls = self.rawResponseCls
kwargs = {'connection': self,
'response': self.connection.getresponse()}
else:
responseCls = self.responseCls
kwargs = {'connection': self,
'response': self.connection.getresponse()}
try:
response = responseCls(**kwargs)
finally:
# Always reset the context after the request has completed
self.reset_context()
return response
def morph_action_hook(self, action):
url = urlparse.urljoin(self.request_path.lstrip('/').rstrip('/') +
'/', action.lstrip('/'))
if not url.startswith('/'):
return '/' + url
else:
return url
def add_default_params(self, params):
"""
Adds default parameters (such as API key, version, etc.)
to the passed `params`
Should return a dictionary.
"""
return params
def add_default_headers(self, headers):
"""
Adds default headers (such as Authorization, X-Foo-Bar)
to the passed `headers`
Should return a dictionary.
"""
return headers
def pre_connect_hook(self, params, headers):
"""
A hook which is called before connecting to the remote server.
This hook can perform a final manipulation on the params, headers and
url parameters.
:type params: ``dict``
:param params: Request parameters.
:type headers: ``dict``
:param headers: Request headers.
"""
return params, headers
def encode_data(self, data):
"""
Encode body data.
Override in a provider's subclass.
"""
return data
def _add_cache_busting_to_params(self, params):
"""
Add cache busting parameter to the query parameters of a GET request.
Parameters are only added if "cache_busting" class attribute is set to
True.
Note: This should only be used with *naughty* providers which use
excessive caching of responses.
"""
cache_busting_value = binascii.hexlify(os.urandom(8)).decode('ascii')
if isinstance(params, dict):
params['cache-busting'] = cache_busting_value
else:
params.append(('cache-busting', cache_busting_value))
return params
class PollingConnection(Connection):
"""
Connection class which can also work with the async APIs.
After initial requests, this class periodically polls for jobs status and
waits until the job has finished.
If job doesn't finish in timeout seconds, an Exception thrown.
"""
poll_interval = 0.5
timeout = 200
request_method = 'request'
def async_request(self, action, params=None, data=None, headers=None,
method='GET', context=None):
"""
Perform an 'async' request to the specified path. Keep in mind that
this function is *blocking* and 'async' in this case means that the
hit URL only returns a job ID which is the periodically polled until
the job has completed.
This function works like this:
- Perform a request to the specified path. Response should contain a
'job_id'.
- Returned 'job_id' is then used to construct a URL which is used for
retrieving job status. Constructed URL is then periodically polled
until the response indicates that the job has completed or the
timeout of 'self.timeout' seconds has been reached.
:type action: ``str``
:param action: A path
:type params: ``dict``
:param params: Optional mapping of additional parameters to send. If
None, leave as an empty ``dict``.
:type data: ``unicode``
:param data: A body of data to send with the request.
:type headers: ``dict``
:param headers: Extra headers to add to the request
None, leave as an empty ``dict``.
:type method: ``str``
:param method: An HTTP method such as "GET" or "POST".
:type context: ``dict``
:param context: Context dictionary which is passed to the functions
which construct initial and poll URL.
:return: An :class:`Response` instance.
:rtype: :class:`Response` instance
"""
request = getattr(self, self.request_method)
kwargs = self.get_request_kwargs(action=action, params=params,
data=data, headers=headers,
method=method,
context=context)
response = request(**kwargs)
kwargs = self.get_poll_request_kwargs(response=response,
context=context,
request_kwargs=kwargs)
end = time.time() + self.timeout
completed = False
while time.time() < end and not completed:
response = request(**kwargs)
completed = self.has_completed(response=response)
if not completed:
time.sleep(self.poll_interval)
if not completed:
raise LibcloudError('Job did not complete in %s seconds' %
(self.timeout))
return response
def get_request_kwargs(self, action, params=None, data=None, headers=None,
method='GET', context=None):
"""
Arguments which are passed to the initial request() call inside
async_request.
"""
kwargs = {'action': action, 'params': params, 'data': data,
'headers': headers, 'method': method}
return kwargs
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
Return keyword arguments which are passed to the request() method when
polling for the job status.
:param response: Response object returned by poll request.
:type response: :class:`HTTPResponse`
:param request_kwargs: Kwargs previously used to initiate the
poll request.
:type response: ``dict``
:return ``dict`` Keyword arguments
"""
raise NotImplementedError('get_poll_request_kwargs not implemented')
def has_completed(self, response):
"""
Return job completion status.
:param response: Response object returned by poll request.
:type response: :class:`HTTPResponse`
:return ``bool`` True if the job has completed, False otherwise.
"""
raise NotImplementedError('has_completed not implemented')
class ConnectionKey(Connection):
"""
Base connection class which accepts a single ``key`` argument.
"""
def __init__(self, key, secure=True, host=None, port=None, url=None,
timeout=None, proxy_url=None, backoff=None, retry_delay=None):
"""
Initialize `user_id` and `key`; set `secure` to an ``int`` based on
passed value.
"""
super(ConnectionKey, self).__init__(secure=secure, host=host,
port=port, url=url,
timeout=timeout,
proxy_url=proxy_url,
backoff=backoff,
retry_delay=retry_delay)
self.key = key
class CertificateConnection(Connection):
"""
Base connection class which accepts a single ``cert_file`` argument.
"""
def __init__(self, cert_file, secure=True, host=None, port=None, url=None,
proxy_url=None, timeout=None, backoff=None, retry_delay=None):
"""
Initialize `cert_file`; set `secure` to an ``int`` based on
passed value.
"""
super(CertificateConnection, self).__init__(secure=secure, host=host,
port=port, url=url,
timeout=timeout,
backoff=backoff,
retry_delay=retry_delay,
proxy_url=proxy_url)
self.cert_file = cert_file
class KeyCertificateConnection(CertificateConnection):
"""
Base connection class which accepts both ``key_file`` and ``cert_file``
argument.
"""
key_file = None
def __init__(self, key_file, cert_file, secure=True, host=None, port=None,
url=None, proxy_url=None, timeout=None, backoff=None,
retry_delay=None):
"""
Initialize `cert_file`; set `secure` to an ``int`` based on
passed value.
"""
super(KeyCertificateConnection, self).__init__(cert_file,
secure=secure,
host=host,
port=port, url=url,
timeout=timeout,
backoff=backoff,
retry_delay=retry_delay,
proxy_url=proxy_url)
self.key_file = key_file
class ConnectionUserAndKey(ConnectionKey):
"""
Base connection class which accepts a ``user_id`` and ``key`` argument.
"""
user_id = None # type: int
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
backoff=None, retry_delay=None):
super(ConnectionUserAndKey, self).__init__(key, secure=secure,
host=host, port=port,
url=url, timeout=timeout,
backoff=backoff,
retry_delay=retry_delay,
proxy_url=proxy_url)
self.user_id = user_id
class BaseDriver(object):
"""
Base driver class from which other classes can inherit from.
"""
connectionCls = ConnectionKey # type: Type[Connection]
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=None, **kwargs):
"""
:param key: API key or username to be used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:param api_version: Optional API version. Only used by drivers
which support multiple API versions.
:type api_version: ``str``
:param region: Optional driver region. Only used by drivers which
support multiple regions.
:type region: ``str``
:rtype: ``None``
"""
self.key = key
self.secret = secret
self.secure = secure
args = [self.key]
if self.secret is not None:
args.append(self.secret)
args.append(secure)
if host is not None:
args.append(host)
if port is not None:
args.append(port)
self.api_version = api_version
self.region = region
conn_kwargs = self._ex_connection_class_kwargs()
conn_kwargs.update({'timeout': kwargs.pop('timeout', None),
'retry_delay': kwargs.pop('retry_delay', None),
'backoff': kwargs.pop('backoff', None),
'proxy_url': kwargs.pop('proxy_url', None)})
self.connection = self.connectionCls(*args, **conn_kwargs)
self.connection.driver = self
self.connection.connect()
def _ex_connection_class_kwargs(self):
"""
Return extra connection keyword arguments which are passed to the
Connection class constructor.
"""
return {}
| []
| []
| [
"LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS"
]
| [] | ["LIBCLOUD_RETRY_FAILED_HTTP_REQUESTS"] | python | 1 | 0 | |
device/device_registration_test.go | package device_test
import (
"fmt"
"github.com/datumchi/protocol-tests/testutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
)
var _ = Describe("Device Registration", func() {
Context("Using Valid DeviceInfo", func() {
identityServiceAddr := fmt.Sprintf("%s:%s", os.Getenv("DATUMCHI_IDENTITY_SERVICE_HOST"), os.Getenv("DATUMCHI_IDENTITY_SERVICE_PORT"))
persona := testutils.EstablishValidStandardHumanPersona(identityServiceAddr, "developer.datumchi.com")
It("Device registers itself with the Identity Service using valid data and authenticates successfully", func(){
Expect(len(persona.AuthTokens)).To(Equal(1))
})
})
}) | [
"\"DATUMCHI_IDENTITY_SERVICE_HOST\"",
"\"DATUMCHI_IDENTITY_SERVICE_PORT\""
]
| []
| [
"DATUMCHI_IDENTITY_SERVICE_PORT",
"DATUMCHI_IDENTITY_SERVICE_HOST"
]
| [] | ["DATUMCHI_IDENTITY_SERVICE_PORT", "DATUMCHI_IDENTITY_SERVICE_HOST"] | go | 2 | 0 | |
functions/ocr/ocr-save-result/src/test/java/functions/OcrSaveResultTest.java | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package functions;
import static com.google.common.truth.Truth.assertThat;
import com.google.cloud.storage.BlobInfo;
import com.google.cloud.storage.Storage;
import com.google.cloud.storage.StorageOptions;
import com.google.common.testing.TestLogHandler;
import com.google.common.truth.Truth;
import com.google.events.cloud.pubsub.v1.Message;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import java.io.IOException;
import java.util.Base64;
import java.util.List;
import java.util.UUID;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class OcrSaveResultTest {
private static String RESULT_BUCKET = System.getenv("RESULT_BUCKET");
private static final Logger logger = Logger.getLogger(OcrSaveResult.class.getName());
private static final TestLogHandler LOG_HANDLER = new TestLogHandler();
private static final Gson gson = new Gson();
private static final Storage STORAGE = StorageOptions.getDefaultInstance().getService();
private static final String RANDOM_STRING = UUID.randomUUID().toString();
@BeforeClass
public static void setUpClass() {
logger.addHandler(LOG_HANDLER);
}
@After
public void afterTest() {
LOG_HANDLER.clear();
}
@AfterClass
public static void tearDownClass() {
String deletedFilename = String.format("test-%s.jpg_to_es.txt", RANDOM_STRING);
STORAGE.delete(RESULT_BUCKET, deletedFilename);
}
@Test(expected = IllegalArgumentException.class)
public void functionsOcrSave_shouldValidateParams() throws IOException {
Message message = new Message();
message.setData(new String(Base64.getEncoder().encode("{}".getBytes())));
new OcrSaveResult().accept(message, null);
}
@Test
public void functionsOcrSave_shouldPublishTranslatedText() throws IOException {
String text = "Wake up human!";
String filename = String.format("test-%s.jpg", RANDOM_STRING);
String lang = "es";
JsonObject dataJson = new JsonObject();
dataJson.addProperty("text", text);
dataJson.addProperty("filename", filename);
dataJson.addProperty("lang", lang);
Message message = new Message();
message.setData(new String(Base64.getEncoder().encode(gson.toJson(dataJson).getBytes())));
new OcrSaveResult().accept(message, null);
String resultFilename = filename + "_to_es.txt";
// Check log messages
List<LogRecord> logs = LOG_HANDLER.getStoredLogRecords();
String expectedMessage = String.format(
"Saving result to %s in bucket %s", resultFilename, RESULT_BUCKET);
Truth.assertThat(LOG_HANDLER.getStoredLogRecords().get(1).getMessage()).isEqualTo(
expectedMessage);
// Check that file was written
BlobInfo resultBlob = STORAGE.get(RESULT_BUCKET, resultFilename);
assertThat(resultBlob).isNotNull();
}
}
| [
"\"RESULT_BUCKET\""
]
| []
| [
"RESULT_BUCKET"
]
| [] | ["RESULT_BUCKET"] | java | 1 | 0 | |
mcc/templates.py | # -*- coding: utf-8 -*-
"""Create and Manage Launch Templates"""
import os
import time
import boto3
import botocore
def create_security_group(security_groups=None, ips=None, ports=None, rules=None, ec2=boto3.resource("ec2")):
"Create Security Group"
vpc_id = ec2.meta.client.describe_vpcs().get('Vpcs', [{}])[0].get('VpcId', '')
kwargs = {"Description": f"default-sg for VPC {vpc_id}",
"GroupName": f"{vpc_id}_default", "VpcId": vpc_id}
try:
security_group = ec2.create_security_group(**kwargs)
security_group_id = security_group.id
if rules is None:
rules = []
if ports is not None and (ips is not None or security_groups is not None):
for port, protocol in ports.items():
base_rule = dict(IpProtocol=protocol, FromPort=port, ToPort=port)
if ips is not None:
ip_rule = base_rule
ip_rule["IpRanges"] = [{"CidrIp": ip, "Description": name} for name, ip in ips.items()]
rules.append(ip_rule)
if security_groups is not None:
sg_rule = base_rule
sg_rule["UserIdGroupPairs"] = [{"GroupId": sg, "Description": name} for name, sg in security_groups.items()]
rules.append(sg_rule)
base_rule["UserIdGroupPairs"] = [{"GroupId": security_group_id, "Description": "Default Security Group"}]
rules.append(base_rule)
response = ec2.meta.client.authorize_security_group_ingress(GroupId=security_group_id, IpPermissions=rules)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidGroup.Duplicate":
security_group_id = ec2.meta.client.describe_security_groups(GroupNames=[kwargs["GroupName"]])["SecurityGroups"][0]["GroupId"]
else:
raise e
return security_group_id
def create_key_pair(keyname="aws_default_key", ec2=boto3.resource("ec2")):
"Creates Key Pair"
try:
response = ec2.meta.client.create_key_pair(KeyName=keyname)
with open(os.path.join(os.environ["HOME"], ".ssh", f"{keyname}.pem"), "w") as f:
f.write(response["KeyMaterial"])
os.chmod(os.path.join(os.environ["HOME"], ".ssh", f"{keyname}.pem"), 0o600)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidKeyPair.Duplicate":
pass
else:
raise e
return keyname
def build_template_userdata(access_key, secret_key, region):
"Create UserData script for instance prep"
with open("template_script.sh", "r") as f:
launch_script = f.read()
# Load Additional Python Requirements
with open("requirements.txt", "r") as f:
py_reqs = " ".join(f.read().split("\n"))
template_options = {r"{py_ver}": "3.7",
r"{py_reqs}": py_reqs,
r"{aws_access_key}": access_key,
r"{aws_secret_key}": secret_key,
r"{aws_region}":region}
for key, value in template_options.items():
launch_script = launch_script.replace(key, value)
return launch_script
def create_custom_image(security_group_id, keyname, launch_script="", default_ami="ami-0ff8a91507f77f867", instance_type="t2.micro", ec2=boto3.resource("ec2")):
"Creates custom ec2 ami"
launch_options = {"ImageId": default_ami, "SecurityGroupIds": [security_group_id], "UserData": launch_script,
"MinCount": 1, "MaxCount": 1, "KeyName": keyname, "InstanceType": instance_type,
"InstanceInitiatedShutdownBehavior": "stop"}
base_instance = ec2.create_instances(**launch_options)[0]
base_instance.wait_until_running()
userdata_running = True
while userdata_running:
instance_details = ec2.meta.client.describe_instances(InstanceIds=[base_instance.id])["Reservations"][0]["Instances"][0]
for tag in instance_details.get("Tags", []):
if tag["Key"] == "UserData" and tag["Value"] == "complete":
userdata_running = False
time.sleep(5)
try:
response = ec2.meta.client.create_image(InstanceId=base_instance.id, Name="default_ami", Description="default custom image")
custom_ami_id = response["ImageId"]
ami_status = ec2.meta.client.describe_images(ImageIds=[custom_ami_id])["Images"][0]["State"]
while ami_status == "pending":
time.sleep(5)
ami_status = ec2.meta.client.describe_images(ImageIds=[custom_ami_id])["Images"][0]["State"]
if ami_status == "available":
base_instance.terminate()
elif ami_status == "failed":
raise Exception("Creation of AMI failed")
else:
print(f"Warning: AMI {custom_ami_id} has status {ami_status}")
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidAMIName.Duplicate":
pass
else:
raise e
return custom_ami_id
def create_launch_template(custom_ami_id, security_group_id, ec2=boto3.resource("ec2")):
"Creates launch template"
try:
response = ec2.meta.client.create_launch_template(LaunchTemplateName="default_template", LaunchTemplateData=dict(ImageId=custom_ami_id, SecurityGroupIds=[security_group_id], KeyName="aws_default_key"))
template_id = response["LaunchTemplate"]["LaunchTemplateId"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidLaunchTemplateName.AlreadyExistsException":
pass
else:
raise e
return template_id
def create_redis_server(security_group_id, name="redis-default-cache", nodes=1, instance_type="cache.t2.micro", port=6379, redis_client=boto3.client("elasticache")):
"Creates a ElastiCache Redis Server"
try:
response = redis_client.create_cache_cluster(CacheClusterId=name,
AZMode="single-az",
NumCacheNodes=nodes,
CacheNodeType=instance_type,
Engine="redis",
SecurityGroupIds=[security_group_id],
Port=port)
status = response["CacheCluster"]["CacheClusterStatus"]
while status != "available":
time.sleep(5)
response = redis_client.describe_cache_clusters(CacheClusterId=name, ShowCacheNodeInfo=True)
status = response["CacheClusters"][0]["CacheClusterStatus"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] != "CacheClusterAlreadyExists":
raise e
response = redis_client.describe_cache_clusters(CacheClusterId=name, ShowCacheNodeInfo=True)
endpoint = response["CacheClusters"][0]["CacheNodes"][0]["EndPoint"]
return name, endpoint, port
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
great_expectations/cli/toolkit.py | import json
import logging
import os
import subprocess
import sys
from json.decoder import JSONDecodeError
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import click
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.cli.batch_request import get_batch_request
from great_expectations.cli.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.pretty_printing import cli_colorize_string, cli_message
from great_expectations.cli.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.core.batch import BatchRequest
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
)
from great_expectations.datasource import BaseDatasource
from great_expectations.validator.validator import Validator
logger = logging.getLogger(__name__)
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api"
"</cyan>.\n"
)
def prompt_profile_to_create_a_suite(
data_context: DataContext,
expectation_suite_name: str,
) -> None:
cli_message(
string="""
Great Expectations will create a notebook, containing code cells that select from available columns in your dataset and
generate expectations about them to demonstrate some examples of assertions you can make about your data.
When you run this notebook, Great Expectations will store these expectations in a new Expectation Suite "{:s}" here:
{:s}
""".format(
expectation_suite_name,
data_context.stores[
data_context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
def get_or_create_expectation_suite(
expectation_suite_name: str,
data_context: DataContext,
data_asset_name: Optional[str] = None,
usage_event: Optional[str] = None,
suppress_usage_message: bool = False,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
create_if_not_exist: bool = True,
) -> ExpectationSuite:
if expectation_suite_name is None:
default_expectation_suite_name: str = get_default_expectation_suite_name(
data_asset_name=data_asset_name,
batch_request=batch_request,
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if (
expectation_suite_name
not in data_context.list_expectation_suite_names()
):
break
tell_user_suite_exists(
data_context=data_context,
expectation_suite_name=expectation_suite_name,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
)
elif expectation_suite_name in data_context.list_expectation_suite_names():
tell_user_suite_exists(
data_context=data_context,
expectation_suite_name=expectation_suite_name,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
)
suite: ExpectationSuite = load_expectation_suite(
data_context=data_context,
expectation_suite_name=expectation_suite_name,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
create_if_not_exist=create_if_not_exist,
)
return suite
def get_default_expectation_suite_name(
data_asset_name: str,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
) -> str:
suite_name: str
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif batch_request:
suite_name = f"batch-{BatchRequest(**batch_request).id}"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(
data_context: DataContext,
expectation_suite_name: str,
usage_event: str,
suppress_usage_message: bool = False,
) -> None:
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"""<red>An expectation suite named `{expectation_suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`.""",
)
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override: Optional[str] = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def get_validator(
context: DataContext,
batch_request: Union[dict, BatchRequest],
suite: Union[str, ExpectationSuite],
) -> Validator:
assert isinstance(
suite, (str, ExpectationSuite)
), "Invalid suite type (must be ExpectationSuite) or a string."
if isinstance(batch_request, dict):
batch_request = BatchRequest(**batch_request)
validator: Validator
if isinstance(suite, str):
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name=suite
)
else:
validator = context.get_validator(
batch_request=batch_request, expectation_suite=suite
)
return validator
def load_expectation_suite(
data_context: DataContext,
expectation_suite_name: str,
usage_event: str,
suppress_usage_message: bool = False,
create_if_not_exist: bool = True,
) -> Optional[ExpectationSuite]:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param data_context:
:param expectation_suite_name:
:param usage_event:
:param suppress_usage_message:
:param create_if_not_exist:
"""
if expectation_suite_name.endswith(".json"):
expectation_suite_name = expectation_suite_name[:-5]
suite: Optional[ExpectationSuite]
try:
suite = data_context.get_expectation_suite(
expectation_suite_name=expectation_suite_name
)
return suite
except ge_exceptions.DataContextError:
if create_if_not_exist:
suite = data_context.create_expectation_suite(
expectation_suite_name=expectation_suite_name
)
return suite
else:
suite = None
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>Could not find a suite named `{expectation_suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
data_context: DataContext,
usage_event: str,
suppress_usage_message: bool = False,
message: Optional[str] = None,
) -> None:
if message:
cli_message(string=message)
if not suppress_usage_message:
send_usage_message(
data_context=data_context,
event=usage_event,
success=False,
)
sys.exit(1)
def delete_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
assume_yes: bool,
) -> None:
"""Delete a Checkpoint or raise helpful errors."""
validate_checkpoint(
context=context,
checkpoint_name=checkpoint_name,
usage_event=usage_event,
)
confirm_prompt: str = f"""\nAre you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?"
"""
continuation_message: str = (
f'The Checkpoint "{checkpoint_name}" was not deleted. Exiting now.'
)
if not assume_yes:
confirm_proceed_or_exit(
confirm_prompt=confirm_prompt,
continuation_message=continuation_message,
data_context=context,
usage_stats_event=usage_event,
)
context.delete_checkpoint(name=checkpoint_name)
def run_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> CheckpointResult:
"""Run a Checkpoint or raise helpful errors."""
failure_message: str = "Exception occurred while running Checkpoint."
validate_checkpoint(
context=context,
checkpoint_name=checkpoint_name,
usage_event=usage_event,
failure_message=failure_message,
)
try:
result: CheckpointResult = context.run_checkpoint(
checkpoint_name=checkpoint_name
)
return result
except ge_exceptions.CheckpointError as e:
cli_message(string=failure_message)
exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event,
message=f"<red>{e}.</red>",
)
def validate_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
failure_message: Optional[str] = None,
) -> None:
try:
# noinspection PyUnusedLocal
checkpoint: Union[Checkpoint, LegacyCheckpoint] = load_checkpoint(
context=context, checkpoint_name=checkpoint_name, usage_event=usage_event
)
except ge_exceptions.CheckpointError as e:
if failure_message:
cli_message(string=failure_message)
exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event,
message=f"<red>{e}.</red>",
)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a Checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event,
message=f"""\
<red>Could not find Checkpoint `{checkpoint_name}` (or its configuration is invalid).</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your Checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new Checkpoint""",
)
def select_datasource(
context: DataContext, datasource_name: str = None
) -> BaseDatasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source: Optional[BaseDatasource] = None
if datasource_name is None:
data_sources: List[BaseDatasource] = cast(
List[BaseDatasource],
list(
sorted(
context.datasources.values(), key=lambda x: (len(x.name), x.name)
),
),
)
if len(data_sources) == 0:
cli_message(
string="<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0].name
else:
choices: str = "\n".join(
[
f" {i}. {data_source.name}"
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection: str = click.prompt(
"Select a datasource" + "\n" + choices + "\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1].name
if datasource_name is not None:
data_source = context.get_datasource(datasource_name=datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> Optional[DataContext]:
"""Return a DataContext with good error handling and exit codes."""
context: Optional[DataContext]
ge_config_version: float
try:
directory = directory or DataContext.find_context_root_dir()
context = DataContext(context_root_dir=directory)
ge_config_version = context.get_config().config_version
if from_cli_upgrade_command:
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
context = upgrade_project_one_or_multiple_versions_increment(
directory=directory,
context=context,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
elif ge_config_version > CURRENT_GE_CONFIG_VERSION:
raise ge_exceptions.UnsupportedConfigVersionError(
f"""Invalid config version ({ge_config_version}).\n The maximum valid version is \
{CURRENT_GE_CONFIG_VERSION}.
"""
)
else:
context = upgrade_project_zero_versions_increment(
directory=directory,
context=context,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
context = upgrade_project_strictly_multiple_versions_increment(
directory=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if context:
return context
else:
cli_message(string=f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(string=f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(string=err.cli_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(string=err.cli_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(string=f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project_strictly_multiple_versions_increment(
directory: str, ge_config_version: float, from_cli_upgrade_command: bool = False
) -> Optional[DataContext]:
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
context: Optional[DataContext]
if upgrade_helper_class and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
context = DataContext(context_root_dir=directory)
# noinspection PyBroadException
try:
send_usage_message(
data_context=context,
event="cli.project.upgrade.end",
success=True,
)
except Exception:
# Don't fail for usage stats
pass
else:
context = None
return context
def upgrade_project(
context_root_dir: str,
ge_config_version: float,
from_cli_upgrade_command: bool = False,
) -> None:
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(string=message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
# This loading of DataContext is optional and just to track if someone exits here.
# noinspection PyBroadException
try:
data_context = DataContext(context_root_dir)
except Exception:
# Do not raise error for usage stats
data_context = None
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
data_context=data_context,
usage_stats_event="cli.project.upgrade.end",
)
cli_message(string=SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while int(ge_config_version) < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
update_version=True,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1.0
cli_message(string=SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{ge_config_version + 1.0}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api</cyan>
"""
if int(ge_config_version) < CURRENT_GE_CONFIG_VERSION:
cli_message(string=upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
# noinspection PyBroadException
try:
data_context: DataContext = DataContext(context_root_dir=context_root_dir)
send_usage_message(
data_context=data_context,
event="cli.project.upgrade.end",
success=True,
)
except Exception:
# Do not raise error for usage stats
pass
sys.exit(0)
def upgrade_project_one_or_multiple_versions_increment(
directory: str,
context: DataContext,
ge_config_version: float,
from_cli_upgrade_command: bool = False,
) -> Optional[DataContext]:
# noinspection PyBroadException
try:
send_usage_message(
data_context=context,
event="cli.project.upgrade.begin",
success=True,
)
except Exception:
# Don't fail for usage stats
pass
upgrade_successful: bool = False
if (CURRENT_GE_CONFIG_VERSION - int(ge_config_version)) == 1:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
update_version=True,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
upgrade_successful = True
else:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
upgrade_successful = True
if upgrade_successful:
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class:
upgrade_helper = upgrade_helper_class(
context_root_dir=directory, update_version=False
)
else:
error_message: str = f"The upgrade utility for version {ge_config_version} could not be found."
cli_message(string=f"<red>{error_message}</red>")
sys.exit(1)
manual_steps_required = upgrade_helper.manual_steps_required()
if manual_steps_required:
upgrade_message = "Your project requires manual upgrade steps in order to be up-to-date.\n"
cli_message(f"<yellow>{upgrade_message}</yellow>")
else:
upgrade_message = (
"Your project is up-to-date - no further upgrade is necessary.\n"
)
cli_message(f"<green>{upgrade_message}</green>")
context = DataContext(context_root_dir=directory)
# noinspection PyBroadException
try:
send_usage_message(
data_context=context,
event="cli.project.upgrade.end",
success=True,
)
except Exception:
# Don't fail for usage stats
pass
else:
context = None
return context
def upgrade_project_zero_versions_increment(
directory: str,
context: DataContext,
ge_config_version: float,
from_cli_upgrade_command: bool = False,
) -> Optional[DataContext]:
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class:
upgrade_helper = upgrade_helper_class(
context_root_dir=directory, update_version=False
)
else:
error_message: str = (
f"The upgrade utility for version {ge_config_version} could not be found."
)
cli_message(string=f"<red>{error_message}</red>")
sys.exit(1)
manual_steps_required = upgrade_helper.manual_steps_required()
if manual_steps_required:
# noinspection PyBroadException
try:
send_usage_message(
data_context=context,
event="cli.project.upgrade.begin",
success=True,
)
except Exception:
# Don't fail for usage stats
pass
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
update_version=False,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or increment_version:
context = None
else:
if manual_steps_required:
upgrade_message = "Your project requires manual upgrade steps in order to be up-to-date.\n"
cli_message(f"<yellow>{upgrade_message}</yellow>")
else:
upgrade_message = (
"Your project is up-to-date - no further upgrade is necessary.\n"
)
cli_message(f"<green>{upgrade_message}</green>")
context = DataContext(context_root_dir=directory)
# noinspection PyBroadException
try:
send_usage_message(
data_context=context,
event="cli.project.upgrade.end",
success=True,
)
except Exception:
# Don't fail for usage stats
pass
return context
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
update_version: bool,
from_cli_upgrade_command: bool = False,
) -> Tuple[bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(
context_root_dir=context_root_dir, update_version=update_version
)
manual_steps_required = upgrade_helper.manual_steps_required()
if not (update_version or manual_steps_required):
return False, False
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if from_cli_upgrade_command and confirmation_required:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
if confirmation_required:
cli_message(string="\nUpgrading project...")
cli_message(string=SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(string=upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
int(ge_config_version) + 1,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(string=continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
data_context: Optional[DataContext] = None,
usage_stats_event: Optional[str] = None,
) -> bool:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(string=continuation_message_colorized)
cli_message(string=continuation_message_colorized)
if (usage_stats_event is not None) and (data_context is not None):
# noinspection PyBroadException
try:
send_usage_message(
data_context=data_context,
event=usage_stats_event,
event_payload={"cancelled": True},
success=True,
)
except Exception as e:
# Don't fail on usage stats
logger.debug(f"Something went wrong when sending usage stats: {e}")
pass
sys.exit(exit_code)
else:
return False
return True
def parse_cli_config_file_location(config_file_location: str) -> dict:
"""
Parse CLI yaml config file or directory location into directory and filename.
Uses pathlib to handle windows paths.
Args:
config_file_location: string of config_file_location
Returns:
{
"directory": "directory/where/config/file/is/located",
"filename": "great_expectations.yml"
}
"""
if config_file_location is not None and config_file_location != "":
config_file_location_path = Path(config_file_location)
# If the file or directory exists, treat it appropriately
# This handles files without extensions
filename: Optional[str]
directory: Optional[str]
if config_file_location_path.is_file():
filename = rf"{str(config_file_location_path.name)}"
directory = rf"{str(config_file_location_path.parent)}"
elif config_file_location_path.is_dir():
filename = None
directory = config_file_location
else:
raise ge_exceptions.ConfigNotFoundError()
else:
# Return None if config_file_location is empty rather than default output of ""
directory = None
filename = None
return {"directory": directory, "filename": filename}
def is_cloud_file_url(file_path: str) -> bool:
"""Check for commonly used cloud urls."""
sanitized = file_path.strip()
if sanitized[0:7] == "file://":
return False
if (
sanitized[0:5] in ["s3://", "gs://"]
or sanitized[0:6] == "ftp://"
or sanitized[0:7] in ["http://", "wasb://"]
or sanitized[0:8] == "https://"
):
return True
return False
def get_relative_path_from_config_file_to_base_path(
context_root_directory: str, data_path: str
) -> str:
"""
This function determines the relative path from a given data path relative
to the great_expectations.yml file independent of the current working
directory.
This allows a user to use the CLI from any directory, type a relative path
from their current working directory and have the correct relative path be
put in the great_expectations.yml file.
"""
data_from_working_dir = os.path.relpath(data_path)
context_dir_from_working_dir = os.path.relpath(context_root_directory)
return os.path.relpath(data_from_working_dir, context_dir_from_working_dir)
def load_json_file_into_dict(
filepath: str,
data_context: DataContext,
usage_event: Optional[str] = None,
) -> Optional[Dict[str, Union[str, int, Dict[str, Any]]]]:
suppress_usage_message: bool = (usage_event is None) or (data_context is None)
error_message: str
if not filepath:
error_message = "The path to a JSON file was not specified."
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>{error_message}</red>",
)
if not filepath.endswith(".json"):
error_message = f'The JSON file path "{filepath}" does not have the ".json" extension in the file name.'
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>{error_message}</red>",
)
contents: Optional[str] = None
try:
with open(filepath) as json_file:
contents = json_file.read()
except FileNotFoundError:
error_message = f'The JSON file with the path "{filepath}" could not be found.'
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>{error_message}</red>",
)
batch_request: Optional[Dict[str, Union[str, int, Dict[str, Any]]]] = None
if contents:
try:
batch_request = json.loads(contents)
except JSONDecodeError as jde:
error_message = f"""Error "{jde}" occurred while attempting to load the JSON file with the path
"{filepath}" into dictionary.
"""
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>{error_message}</red>",
)
else:
error_message = f'The JSON file path "{filepath}" is empty.'
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>{error_message}</red>",
)
return batch_request
def get_batch_request_from_citations(
expectation_suite: Optional[ExpectationSuite] = None,
) -> Optional[Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]]:
batch_request_from_citation: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None
if expectation_suite is not None:
citations: List[Dict[str, Any]] = expectation_suite.get_citations(
require_batch_request=True
)
if citations:
citation: Dict[str, Any] = citations[-1]
batch_request_from_citation = citation.get("batch_request")
return batch_request_from_citation
def add_citation_with_batch_request(
data_context: DataContext,
expectation_suite: ExpectationSuite,
batch_request: Optional[Dict[str, Union[str, int, Dict[str, Any]]]] = None,
) -> None:
if (
expectation_suite is not None
and batch_request
and isinstance(batch_request, dict)
and BatchRequest(**batch_request)
):
expectation_suite.add_citation(
comment="Created suite added via CLI",
batch_request=batch_request,
)
data_context.save_expectation_suite(expectation_suite=expectation_suite)
def get_batch_request_from_json_file(
batch_request_json_file_path: str,
data_context: DataContext,
usage_event: Optional[str] = None,
suppress_usage_message: bool = False,
) -> Optional[Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]]:
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = load_json_file_into_dict(
filepath=batch_request_json_file_path,
data_context=data_context,
usage_event=usage_event,
)
try:
batch_request = BatchRequest(**batch_request).to_json_dict()
except TypeError as e:
cli_message(
string="<red>Please check that your batch_request is valid and is able to load a batch.</red>"
)
cli_message(string=f"<red>{e}</red>")
if not suppress_usage_message:
send_usage_message(
data_context=data_context,
event=usage_event,
success=False,
)
sys.exit(1)
return batch_request
def get_batch_request_using_datasource_name(
data_context: DataContext,
datasource_name: Optional[str] = None,
usage_event: Optional[str] = None,
suppress_usage_message: bool = False,
additional_batch_request_args: Optional[
Dict[str, Union[str, int, Dict[str, Any]]]
] = None,
) -> Optional[Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]]:
cli_message(
string="\nA batch of data is required to edit the suite - let's help you to specify it.\n"
)
datasource: BaseDatasource = select_datasource(
context=data_context, datasource_name=datasource_name
)
if not datasource:
cli_message(string="<red>No datasources found in the context.</red>")
if not suppress_usage_message:
send_usage_message(
data_context=data_context,
event=usage_event,
success=False,
)
sys.exit(1)
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = get_batch_request(
datasource=datasource,
additional_batch_request_args=additional_batch_request_args,
)
return batch_request
| []
| []
| [
"GE_JUPYTER_CMD"
]
| [] | ["GE_JUPYTER_CMD"] | python | 1 | 0 | |
cmd/protecodeExecuteScan_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type protecodeExecuteScanOptions struct {
ExcludeCVEs string `json:"excludeCVEs,omitempty"`
FailOnSevereVulnerabilities bool `json:"failOnSevereVulnerabilities,omitempty"`
ScanImage string `json:"scanImage,omitempty"`
DockerRegistryURL string `json:"dockerRegistryUrl,omitempty"`
CleanupMode string `json:"cleanupMode,omitempty"`
FilePath string `json:"filePath,omitempty"`
IncludeLayers bool `json:"includeLayers,omitempty"`
AddSideBarLink bool `json:"addSideBarLink,omitempty"`
TimeoutMinutes string `json:"timeoutMinutes,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
ReportFileName string `json:"reportFileName,omitempty"`
FetchURL string `json:"fetchUrl,omitempty"`
Group string `json:"group,omitempty"`
ReuseExisting bool `json:"reuseExisting,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
ArtifactVersion string `json:"artifactVersion,omitempty"`
PullRequestName string `json:"pullRequestName,omitempty"`
}
type protecodeExecuteScanInflux struct {
protecode_data struct {
fields struct {
historical_vulnerabilities string
triaged_vulnerabilities string
excluded_vulnerabilities string
minor_vulnerabilities string
major_vulnerabilities string
vulnerabilities string
}
tags struct {
}
}
}
func (i *protecodeExecuteScanInflux) persist(path, resourceName string) {
measurementContent := []struct {
measurement string
valType string
name string
value string
}{
{valType: config.InfluxField, measurement: "protecode_data", name: "historical_vulnerabilities", value: i.protecode_data.fields.historical_vulnerabilities},
{valType: config.InfluxField, measurement: "protecode_data", name: "triaged_vulnerabilities", value: i.protecode_data.fields.triaged_vulnerabilities},
{valType: config.InfluxField, measurement: "protecode_data", name: "excluded_vulnerabilities", value: i.protecode_data.fields.excluded_vulnerabilities},
{valType: config.InfluxField, measurement: "protecode_data", name: "minor_vulnerabilities", value: i.protecode_data.fields.minor_vulnerabilities},
{valType: config.InfluxField, measurement: "protecode_data", name: "major_vulnerabilities", value: i.protecode_data.fields.major_vulnerabilities},
{valType: config.InfluxField, measurement: "protecode_data", name: "vulnerabilities", value: i.protecode_data.fields.vulnerabilities},
}
errCount := 0
for _, metric := range measurementContent {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(metric.measurement, fmt.Sprintf("%vs", metric.valType), metric.name), metric.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting influx environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Influx environment")
}
}
// ProtecodeExecuteScanCommand Protecode is an Open Source Vulnerability Scanner that is capable of scanning binaries. It can be used to scan docker images but is supports many other programming languages especially those of the C family.
func ProtecodeExecuteScanCommand() *cobra.Command {
metadata := protecodeExecuteScanMetadata()
var stepConfig protecodeExecuteScanOptions
var startTime time.Time
var influx protecodeExecuteScanInflux
var createProtecodeExecuteScanCmd = &cobra.Command{
Use: "protecodeExecuteScan",
Short: "Protecode is an Open Source Vulnerability Scanner that is capable of scanning binaries. It can be used to scan docker images but is supports many other programming languages especially those of the C family.",
Long: `Protecode is an Open Source Vulnerability Scanner that is capable of scanning binaries. It can be used to scan docker images but is supports many other programming languages especially those of the C family.
!!! hint "Auditing findings (Triaging)"
Triaging is now supported by the Protecode backend and also Piper does consider this information during the analysis of the scan results though product versions are not supported by Protecode. Therefore please make sure that the ` + "`" + `fileName` + "`" + ` you are providing does either contain a stable version or that it does not contain one at all. By ensuring that you are able to triage CVEs globally on the upload file's name without affecting any other artifacts scanned in the same Protecode group and as such triaged vulnerabilities will be considered during the next scan and will not fail the build anymore.`,
PreRunE: func(cmd *cobra.Command, args []string) error {
startTime = time.Now()
log.SetStepName("protecodeExecuteScan")
log.SetVerbose(GeneralConfig.Verbose)
err := PrepareConfig(cmd, &metadata, "protecodeExecuteScan", &stepConfig, config.OpenPiperFile)
if err != nil {
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
return nil
},
Run: func(cmd *cobra.Command, args []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
influx.persist(GeneralConfig.EnvRootPath, "influx")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, "protecodeExecuteScan")
protecodeExecuteScan(stepConfig, &telemetryData, &influx)
telemetryData.ErrorCode = "0"
},
}
addProtecodeExecuteScanFlags(createProtecodeExecuteScanCmd, &stepConfig)
return createProtecodeExecuteScanCmd
}
func addProtecodeExecuteScanFlags(cmd *cobra.Command, stepConfig *protecodeExecuteScanOptions) {
cmd.Flags().StringVar(&stepConfig.ExcludeCVEs, "excludeCVEs", "[]", "DEPRECATED: Do use triaging within the Protecode UI instead")
cmd.Flags().BoolVar(&stepConfig.FailOnSevereVulnerabilities, "failOnSevereVulnerabilities", true, "Whether to fail the job on severe vulnerabilties or not")
cmd.Flags().StringVar(&stepConfig.ScanImage, "scanImage", os.Getenv("PIPER_scanImage"), "The reference to the docker image to scan with Protecode")
cmd.Flags().StringVar(&stepConfig.DockerRegistryURL, "dockerRegistryUrl", os.Getenv("PIPER_dockerRegistryUrl"), "The reference to the docker registry to scan with Protecode")
cmd.Flags().StringVar(&stepConfig.CleanupMode, "cleanupMode", "binary", "Decides which parts are removed from the Protecode backend after the scan")
cmd.Flags().StringVar(&stepConfig.FilePath, "filePath", os.Getenv("PIPER_filePath"), "The path to the file from local workspace to scan with Protecode")
cmd.Flags().BoolVar(&stepConfig.IncludeLayers, "includeLayers", false, "Flag if the docker layers should be included")
cmd.Flags().BoolVar(&stepConfig.AddSideBarLink, "addSideBarLink", true, "Whether to create a side bar link pointing to the report produced by Protecode or not")
cmd.Flags().StringVar(&stepConfig.TimeoutMinutes, "timeoutMinutes", "60", "The timeout to wait for the scan to finish")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", os.Getenv("PIPER_serverUrl"), "The URL to the Protecode backend")
cmd.Flags().StringVar(&stepConfig.ReportFileName, "reportFileName", "protecode_report.pdf", "The file name of the report to be created")
cmd.Flags().StringVar(&stepConfig.FetchURL, "fetchUrl", os.Getenv("PIPER_fetchUrl"), "The URL to fetch the file to scan with Protecode which must be accessible via public HTTP GET request")
cmd.Flags().StringVar(&stepConfig.Group, "group", os.Getenv("PIPER_group"), "The Protecode group ID of your team")
cmd.Flags().BoolVar(&stepConfig.ReuseExisting, "reuseExisting", false, "Whether to reuse an existing product instead of creating a new one")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User which is used for the protecode scan")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password which is used for the user")
cmd.Flags().StringVar(&stepConfig.ArtifactVersion, "artifactVersion", os.Getenv("PIPER_artifactVersion"), "The version of the artifact to allow identification in protecode backend")
cmd.Flags().StringVar(&stepConfig.PullRequestName, "pullRequestName", os.Getenv("PIPER_pullRequestName"), "The name of the pull request")
cmd.MarkFlagRequired("serverUrl")
cmd.MarkFlagRequired("group")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
}
// retrieve step metadata
func protecodeExecuteScanMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "protecodeExecuteScan",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "excludeCVEs",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "protecodeExcludeCVEs"}},
},
{
Name: "failOnSevereVulnerabilities",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "protecodeFailOnSevereVulnerabilities"}},
},
{
Name: "scanImage",
ResourceRef: []config.ResourceReference{{Name: "commonPipelineEnvironment", Param: "container/imageNameTag"}},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "dockerImage"}},
},
{
Name: "dockerRegistryUrl",
ResourceRef: []config.ResourceReference{{Name: "commonPipelineEnvironment", Param: "container/registryUrl"}},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "cleanupMode",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "filePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "includeLayers",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "addSideBarLink",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "timeoutMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "protecodeTimeoutMinutes"}},
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "protecodeServerUrl"}},
},
{
Name: "reportFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "fetchUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "group",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "protecodeGroup"}},
},
{
Name: "reuseExisting",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "user"}},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "artifactVersion",
ResourceRef: []config.ResourceReference{{Name: "commonPipelineEnvironment", Param: "artifactVersion"}},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "pullRequestName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_scanImage\"",
"\"PIPER_dockerRegistryUrl\"",
"\"PIPER_filePath\"",
"\"PIPER_serverUrl\"",
"\"PIPER_fetchUrl\"",
"\"PIPER_group\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_artifactVersion\"",
"\"PIPER_pullRequestName\""
]
| []
| [
"PIPER_group",
"PIPER_password",
"PIPER_username",
"PIPER_scanImage",
"PIPER_artifactVersion",
"PIPER_dockerRegistryUrl",
"PIPER_serverUrl",
"PIPER_fetchUrl",
"PIPER_pullRequestName",
"PIPER_filePath"
]
| [] | ["PIPER_group", "PIPER_password", "PIPER_username", "PIPER_scanImage", "PIPER_artifactVersion", "PIPER_dockerRegistryUrl", "PIPER_serverUrl", "PIPER_fetchUrl", "PIPER_pullRequestName", "PIPER_filePath"] | go | 10 | 0 | |
maven_bom/deps.bzl | load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
def maven_bom_rules_dependencies():
maybe(
java_import_external,
name = "bom_versions_extractor",
jar_urls = [
"https://github.com/slamdev/bom-versions-extractor/releases/download/0.0.2/bom-versions-extractor.jar",
],
jar_sha256 = "008da9f3508a4d85d687229f8c8be369d4d4dcd382f4f6929453de2ccccf5238",
)
def maven_bom_import(boms, repos):
_maven_bom_import(
name = "maven_bom",
boms = boms,
repos = repos,
debug = True,
)
def _impl(ctx):
jar_path = ctx.path(ctx.attr._cli)
java_home = ctx.os.environ.get("JAVA_HOME")
if java_home != None:
java = ctx.path(java_home + "/bin/java")
cmd = [java, "-jar", jar_path]
elif ctx.which("java") != None:
# Use 'java' from $PATH
cmd = [ctx.which("java"), "-jar", jar_path]
else:
cmd = [jar_path]
for r in ctx.attr.repos:
cmd += ["-r", r]
for b in ctx.attr.boms:
cmd += ["-b", b]
cmd += ["-f", ctx.path("versions.json")]
cmd += ["-c", ctx.path("cache")]
exec_result = ctx.execute(
cmd,
quiet = not ctx.attr.debug,
)
if exec_result.return_code != 0:
fail("Unable to run bom-versions-extractor: " + exec_result.stderr)
artifacts = json.decode(ctx.read("versions.json"))
version_defs = []
for a in artifacts:
version_defs.append(
"""
"{group}:{name}":"{group}:{name}:{version}",
""".format(group = a["group"], name = a["name"], version = a["version"]),
)
ctx.file("BUILD")
ctx.file("defs.bzl", content = """
MAVEN_BOMS = {
%s
}
""" % "".join(version_defs))
_maven_bom_import = repository_rule(
_impl,
attrs = {
"boms": attr.string_list(),
"repos": attr.string_list(),
"debug": attr.bool(),
"_cli": attr.label(default = "@bom_versions_extractor//:bom-versions-extractor.jar"),
},
environ = [
"JAVA_HOME",
],
doc = ("Some doc should be here."),
)
| []
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | python | 1 | 0 | |
_vendor/src/camlistore.org/pkg/env/env.go | /*
Copyright 2015 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package env detects what sort of environment Camlistore is running in.
package env
import (
"os"
"strconv"
"sync"
"google.golang.org/cloud/compute/metadata"
)
// IsDebug reports whether this is a debug environment.
func IsDebug() bool {
return isDebug
}
// DebugUploads reports whether this is a debug environment for uploads.
func DebugUploads() bool {
return isDebugUploads
}
// IsDev reports whether this is a development server environment (devcam server).
func IsDev() bool {
return isDev
}
// OsGCE reports whether this process is running in a Google Compute
// Engine (GCE) environment. This only returns true if the
// "camlistore-config-dir" instance metadata value is defined.
// Instances running in custom configs on GCE will be unaffected.
func OnGCE() bool {
gceOnce.Do(detectGCE)
return isGCE
}
var (
gceOnce sync.Once
isGCE bool
)
func detectGCE() {
if !metadata.OnGCE() {
return
}
v, _ := metadata.InstanceAttributeValue("camlistore-config-dir")
isGCE = v != ""
}
var (
isDev = os.Getenv("CAMLI_DEV_CAMLI_ROOT") != ""
isDebug, _ = strconv.ParseBool(os.Getenv("CAMLI_DEBUG"))
isDebugUploads, _ = strconv.ParseBool(os.Getenv("CAMLI_DEBUG_UPLOADS"))
)
| [
"\"CAMLI_DEV_CAMLI_ROOT\"",
"\"CAMLI_DEBUG\"",
"\"CAMLI_DEBUG_UPLOADS\""
]
| []
| [
"CAMLI_DEV_CAMLI_ROOT",
"CAMLI_DEBUG",
"CAMLI_DEBUG_UPLOADS"
]
| [] | ["CAMLI_DEV_CAMLI_ROOT", "CAMLI_DEBUG", "CAMLI_DEBUG_UPLOADS"] | go | 3 | 0 | |
setup.py | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
]
NAME = 'pyscf'
MAINTAINER = 'Qiming Sun'
MAINTAINER_EMAIL = '[email protected]'
DESCRIPTION = 'PySCF: Python-based Simulations of Chemistry Framework'
#LONG_DESCRIPTION = ''
URL = 'http://www.pyscf.org'
DOWNLOAD_URL = 'http://github.com/pyscf/pyscf'
LICENSE = 'Apache License 2.0'
AUTHOR = 'Qiming Sun'
AUTHOR_EMAIL = '[email protected]'
PLATFORMS = ['Linux', 'Mac OS-X', 'Unix']
def get_version():
topdir = os.path.abspath(os.path.join(__file__, '..'))
with open(os.path.join(topdir, 'pyscf', '__init__.py'), 'r') as f:
for line in f.readlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise ValueError("Version string not found")
VERSION = get_version()
EXTRAS = {
'geomopt': ['pyberny>=0.6.2', 'geometric>=0.9.7.2'],
'dftd3': ['pyscf-dftd3'],
'dmrgscf': ['pyscf-dmrgscf'],
'doci': ['pyscf-doci'],
'icmpspt': ['pyscf-icmpspt'],
'properties': ['pyscf-properteis'],
'semiempirical': ['pyscf-semiempirical'],
'shciscf': ['pyscf-shciscf'],
'cppe': ['cppe'],
}
EXTRAS['all'] = [p for extras in EXTRAS.values() for p in extras]
# extras which should not be installed by "all" components
EXTRAS['cornell_shci'] = ['pyscf-cornell-shci']
EXTRAS['nao'] = ['pyscf-nao']
EXTRAS['fciqmcscf'] = ['pyscf-fciqmc']
EXTRAS['tblis'] = ['pyscf-tblis']
class CMakeBuildExt(build_ext):
def run(self):
extension = self.extensions[0]
assert extension.name == 'pyscf_lib_placeholder'
self.build_cmake(extension)
def build_cmake(self, extension):
self.announce('Configuring extensions', level=3)
src_dir = os.path.abspath(os.path.join(__file__, '..', 'pyscf', 'lib'))
cmd = ['cmake', f'-S{src_dir}', f'-B{self.build_temp}']
configure_args = os.getenv('CMAKE_CONFIGURE_ARGS')
if configure_args:
cmd.extend(configure_args.split(' '))
self.spawn(cmd)
self.announce('Building binaries', level=3)
cmd = ['cmake', '--build', self.build_temp, '-j']
build_args = os.getenv('CMAKE_BUILD_ARGS')
if build_args:
cmd.extend(build_args.split(' '))
if self.dry_run:
self.announce(' '.join(cmd))
else:
self.spawn(cmd)
# To remove the infix string like cpython-37m-x86_64-linux-gnu.so
# Python ABI updates since 3.5
# https://www.python.org/dev/peps/pep-3149/
def get_ext_filename(self, ext_name):
ext_path = ext_name.split('.')
filename = build_ext.get_ext_filename(self, ext_name)
name, ext_suffix = os.path.splitext(filename)
return os.path.join(*ext_path) + ext_suffix
# Here to change the order of sub_commands to ['build_py', ..., 'build_ext']
# C extensions by build_ext are installed in source directory.
# build_py then copy all .so files into "build_ext.build_lib" directory.
# We have to ensure build_ext being executed earlier than build_py.
# A temporary workaround is to modifying the order of sub_commands in build class
from distutils.command.build import build
build.sub_commands = ([c for c in build.sub_commands if c[0] == 'build_ext'] +
[c for c in build.sub_commands if c[0] != 'build_ext'])
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
#package_dir={'pyscf': 'pyscf'}, # packages are under directory pyscf
#include *.so *.dat files. They are now placed in MANIFEST.in
#package_data={'': ['*.so', '*.dylib', '*.dll', '*.dat']},
include_package_data=True, # include everything in source control
packages=find_packages(exclude=['*test*', '*examples*']),
# The ext_modules placeholder is to ensure build_ext getting initialized
ext_modules=[Extension('pyscf_lib_placeholder', [])],
cmdclass={'build_ext': CMakeBuildExt},
install_requires=['numpy>1.8,!=1.16,!=1.17',
'scipy!=1.5.0,!=1.5.1',
'h5py>=2.6'],
extras_require=EXTRAS,
)
| []
| []
| [
"CMAKE_BUILD_ARGS",
"CMAKE_CONFIGURE_ARGS"
]
| [] | ["CMAKE_BUILD_ARGS", "CMAKE_CONFIGURE_ARGS"] | python | 2 | 0 | |
configs/configs.go | // Package configs sets up the environment. First it sets a number of default envs, then looks in the $HOME/ghorg/conf.yaml to overwrite the defaults. These values will be superseded by any command line flags used
package configs
import (
"errors"
"fmt"
"log"
"os"
"os/exec"
"reflect"
"strings"
"github.com/gabrie30/ghorg/colorlog"
"github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
var (
// ErrNoGitHubToken error message when token is not found
ErrNoGitHubToken = errors.New("Could not find a valid github token. GHORG_GITHUB_TOKEN or (--token, -t) flag must be set. Create a personal access token, then set it in your $HOME/ghorg/conf.yaml or use the (--token, -t) flag...For best results read the troubleshooting section of README.md https://github.com/gabrie30/ghorg to properly store your token in the osx keychain")
// ErrNoGitLabToken error message when token is not found
ErrNoGitLabToken = errors.New("Could not find a valid gitlab token. GHORG_GITLAB_TOKEN or (--token, -t) flag must be set. Create a token from gitlab then set it in your $HOME/ghorg/conf.yaml or use the (--token, -t) flag...For best results read the troubleshooting section of README.md https://github.com/gabrie30/ghorg to properly store your token in the osx keychain")
// ErrNoBitbucketUsername error message when no username found
ErrNoBitbucketUsername = errors.New("Could not find bitbucket username. GHORG_BITBUCKET_USERNAME or (--bitbucket-username) must be set to clone repos from bitbucket, see 'BitBucket Setup' in README.md")
// ErrNoBitbucketAppPassword error message when no app password found
ErrNoBitbucketAppPassword = errors.New("Could not find a valid bitbucket app password. GHORG_BITBUCKET_APP_PASSWORD or (--token, -t) must be set to clone repos from bitbucket, see 'BitBucket Setup' in README.md")
// ErrIncorrectScmType indicates an unsupported scm type being used
ErrIncorrectScmType = errors.New("GHORG_SCM_TYPE or --scm must be one of github, gitlab, or bitbucket")
// ErrIncorrectCloneType indicates an unsupported clone type being used
ErrIncorrectCloneType = errors.New("GHORG_CLONE_TYPE or --clone-type must be one of org or user")
// ErrIncorrectProtocolType indicates an unsupported protocol type being used
ErrIncorrectProtocolType = errors.New("GHORG_CLONE_PROTOCOL or --protocol must be one of https or ssh")
)
func init() {
initConfig()
}
func initConfig() {
viper.SetConfigType("yaml")
viper.AddConfigPath(GhorgDir())
viper.SetConfigName("conf")
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// Config file not found; ignore error if desired
fmt.Println(err)
fmt.Println("Could not find $HOME/ghorg/conf.yaml file, please add one")
} else {
// Config file was found but another error was produced
fmt.Println(err)
fmt.Println("Something unexpected happened")
}
}
getOrSetDefaults("GHORG_ABSOLUTE_PATH_TO_CLONE_TO")
getOrSetDefaults("GHORG_BRANCH")
getOrSetDefaults("GHORG_CLONE_PROTOCOL")
getOrSetDefaults("GHORG_CLONE_TYPE")
getOrSetDefaults("GHORG_SCM_TYPE")
getOrSetDefaults("GHORG_GITLAB_DEFAULT_NAMESPACE")
getOrSetDefaults("GHORG_COLOR")
getOrSetDefaults("GHORG_SKIP_ARCHIVED")
getOrSetDefaults("GHORG_BACKUP")
getOrSetDefaults("GHORG_CONCURRENCY")
// Optionally set
getOrSetDefaults("GHORG_GITHUB_TOKEN")
getOrSetDefaults("GHORG_GITLAB_TOKEN")
getOrSetDefaults("GHORG_BITBUCKET_USERNAME")
getOrSetDefaults("GHORG_BITBUCKET_APP_PASSWORD")
getOrSetDefaults("GHORG_SCM_BASE_URL")
getOrSetDefaults("GHORG_PRESERVE_DIRECTORY_STRUCTURE")
}
// Load triggers the configs to load first, not sure if this is actually needed
func Load() {}
// GetRequiredString verifies env is set
func GetRequiredString(key string) string {
value := viper.GetString(key)
if isZero(value) {
log.Fatalf("Fatal: '%s' ENV VAR is required", key)
}
return value
}
func isZero(value interface{}) bool {
return value == reflect.Zero(reflect.TypeOf(value)).Interface()
}
func getOrSetDefaults(envVar string) {
// When a user does not set value in $HOME/ghorg/conf.yaml set the default values, else set env to what they have added to the file.
if viper.GetString(envVar) == "" {
switch envVar {
case "GHORG_ABSOLUTE_PATH_TO_CLONE_TO":
os.Setenv(envVar, HomeDir()+"/Desktop/")
case "GHORG_BRANCH":
os.Setenv(envVar, "master")
case "GHORG_CLONE_PROTOCOL":
os.Setenv(envVar, "https")
case "GHORG_CLONE_TYPE":
os.Setenv(envVar, "org")
case "GHORG_SCM_TYPE":
os.Setenv(envVar, "github")
case "GHORG_GITLAB_DEFAULT_NAMESPACE":
os.Setenv(envVar, "unset")
case "GHORG_COLOR":
os.Setenv(envVar, "on")
case "GHORG_SKIP_ARCHIVED":
os.Setenv(envVar, "false")
case "GHORG_BACKUP":
os.Setenv(envVar, "false")
case "GHORG_PRESERVE_DIRECTORY_STRUCTURE":
os.Setenv(envVar, "false")
case "GHORG_CONCURRENCY":
os.Setenv(envVar, "25")
}
} else {
// User forgot to put a / at the end of path, so we will add for them
if envVar == "GHORG_ABSOLUTE_PATH_TO_CLONE_TO" && !strings.HasSuffix(viper.GetString(envVar), "/") {
os.Setenv(envVar, viper.GetString(envVar)+"/")
} else {
os.Setenv(envVar, viper.GetString(envVar))
}
}
}
// GhorgIgnoreLocation returns the path of users ghorgignore
func GhorgIgnoreLocation() string {
return GhorgDir() + "/ghorgignore"
}
// GhorgDir returns the ghorg directory path
func GhorgDir() string {
return HomeDir() + "/ghorg"
}
// HomeDir finds the users home directory
func HomeDir() string {
home, err := homedir.Dir()
if err != nil {
log.Fatal("Error trying to find users home directory")
}
return home
}
// GetOrSetToken will set token based on scm
func GetOrSetToken() {
switch os.Getenv("GHORG_SCM_TYPE") {
case "github":
getOrSetGitHubToken()
case "gitlab":
getOrSetGitLabToken()
case "bitbucket":
getOrSetBitBucketToken()
}
}
func getOrSetGitHubToken() {
var token string
if isZero(os.Getenv("GHORG_GITHUB_TOKEN")) || len(os.Getenv("GHORG_GITHUB_TOKEN")) != 40 {
cmd := `security find-internet-password -s github.com | grep "acct" | awk -F\" '{ print $4 }'`
out, err := exec.Command("bash", "-c", cmd).Output()
if err != nil {
colorlog.PrintError(fmt.Sprintf("Failed to execute command: %s", cmd))
}
token = strings.TrimSuffix(string(out), "\n")
os.Setenv("GHORG_GITHUB_TOKEN", token)
}
}
func getOrSetGitLabToken() {
var token string
if isZero(os.Getenv("GHORG_GITLAB_TOKEN")) || len(os.Getenv("GHORG_GITLAB_TOKEN")) != 20 {
cmd := `security find-internet-password -s gitlab.com | grep "acct" | awk -F\" '{ print $4 }'`
out, err := exec.Command("bash", "-c", cmd).Output()
if err != nil {
colorlog.PrintError(fmt.Sprintf("Failed to execute command: %s", cmd))
}
token = strings.TrimSuffix(string(out), "\n")
os.Setenv("GHORG_GITLAB_TOKEN", token)
}
}
func getOrSetBitBucketToken() {
var token string
if isZero(os.Getenv("GHORG_BITBUCKET_APP_PASSWORD")) || len(os.Getenv("GHORG_BITBUCKET_APP_PASSWORD")) != 20 {
cmd := `security find-internet-password -s bitbucket.com | grep "acct" | awk -F\" '{ print $4 }'`
out, err := exec.Command("bash", "-c", cmd).Output()
if err != nil {
colorlog.PrintError(fmt.Sprintf("Failed to execute command: %s", cmd))
}
token = strings.TrimSuffix(string(out), "\n")
os.Setenv("GHORG_BITBUCKET_APP_PASSWORD", token)
}
}
// VerifyTokenSet checks to make sure env is set for the correct scm provider
func VerifyTokenSet() error {
var tokenLength int
var token string
scmProvider := os.Getenv("GHORG_SCM_TYPE")
if scmProvider == "github" {
tokenLength = 40
token = os.Getenv("GHORG_GITHUB_TOKEN")
}
if scmProvider == "gitlab" {
tokenLength = 20
token = os.Getenv("GHORG_GITLAB_TOKEN")
}
if scmProvider == "bitbucket" {
tokenLength = 20
token = os.Getenv("GHORG_BITBUCKET_APP_PASSWORD")
if os.Getenv("GHORG_BITBUCKET_USERNAME") == "" {
return ErrNoBitbucketUsername
}
}
if len(token) != tokenLength {
if scmProvider == "github" {
return ErrNoGitHubToken
}
if scmProvider == "gitlab" {
return ErrNoGitLabToken
}
if scmProvider == "bitbucket" {
return ErrNoBitbucketAppPassword
}
}
return nil
}
// VerifyConfigsSetCorrectly makes sure flags are set to appropriate values
func VerifyConfigsSetCorrectly() error {
scmType := os.Getenv("GHORG_SCM_TYPE")
cloneType := os.Getenv("GHORG_CLONE_TYPE")
protocol := os.Getenv("GHORG_CLONE_PROTOCOL")
if scmType != "github" && scmType != "gitlab" && scmType != "bitbucket" {
return ErrIncorrectScmType
}
if cloneType != "user" && cloneType != "org" {
return ErrIncorrectCloneType
}
if protocol != "ssh" && protocol != "https" {
return ErrIncorrectProtocolType
}
return nil
}
| [
"\"GHORG_SCM_TYPE\"",
"\"GHORG_GITHUB_TOKEN\"",
"\"GHORG_GITHUB_TOKEN\"",
"\"GHORG_GITLAB_TOKEN\"",
"\"GHORG_GITLAB_TOKEN\"",
"\"GHORG_BITBUCKET_APP_PASSWORD\"",
"\"GHORG_BITBUCKET_APP_PASSWORD\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_GITHUB_TOKEN\"",
"\"GHORG_GITLAB_TOKEN\"",
"\"GHORG_BITBUCKET_APP_PASSWORD\"",
"\"GHORG_BITBUCKET_USERNAME\"",
"\"GHORG_SCM_TYPE\"",
"\"GHORG_CLONE_TYPE\"",
"\"GHORG_CLONE_PROTOCOL\""
]
| []
| [
"GHORG_BITBUCKET_USERNAME",
"GHORG_GITLAB_TOKEN",
"GHORG_CLONE_PROTOCOL",
"GHORG_CLONE_TYPE",
"GHORG_SCM_TYPE",
"GHORG_GITHUB_TOKEN",
"GHORG_BITBUCKET_APP_PASSWORD"
]
| [] | ["GHORG_BITBUCKET_USERNAME", "GHORG_GITLAB_TOKEN", "GHORG_CLONE_PROTOCOL", "GHORG_CLONE_TYPE", "GHORG_SCM_TYPE", "GHORG_GITHUB_TOKEN", "GHORG_BITBUCKET_APP_PASSWORD"] | go | 7 | 0 | |
examples/photosets/main.go | package main
import (
"fmt"
"os"
"github.com/reedwade/flickr"
"github.com/reedwade/flickr/photosets"
)
func main() {
// retrieve Flickr credentials from env vars
apik := os.Getenv("FLICKRGO_API_KEY")
apisec := os.Getenv("FLICKRGO_API_SECRET")
token := os.Getenv("FLICKRGO_OAUTH_TOKEN")
tokenSecret := os.Getenv("FLICKRGO_OAUTH_TOKEN_SECRET")
nsid := os.Getenv("FLICKRGO_USER_ID")
// do not proceed if credentials were not provided
if apik == "" || apisec == "" || token == "" || tokenSecret == "" {
fmt.Fprintln(os.Stderr, "Please set FLICKRGO_API_KEY, FLICKRGO_API_SECRET "+
"and FLICKRGO_OAUTH_TOKEN, FLICKRGO_OAUTH_TOKEN_SECRET env vars")
os.Exit(1)
}
// create an API client with credentials
client := flickr.NewFlickrClient(apik, apisec)
client.OAuthToken = token
client.OAuthTokenSecret = tokenSecret
client.Id = nsid
/*
response, _ := photosets.GetList(client, false, "23148015@N00", 1)
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.GetPhotos(client, false, "72157632076344815", "23148015@N00", 1)
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.EditMeta(client, "72157654143356943", "bar", "Baz")
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.EditPhotos(client, "72157654143356943", "9518691684", []string{"9518691684", "19681581995"})
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.RemovePhotos(client, "72157654143356943", []string{"9518691684", "19681581995"})
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.SetPrimaryPhoto(client, "72157656097802609", "16438207896")
fmt.Println(fmt.Sprintf("%+v", *response))
response, _ := photosets.OrderSets(client, []string{"72157656097802609"})
fmt.Println(fmt.Sprintf("%+v", *response))
*/
response, _ := photosets.GetInfo(client, true, "72157656097802609", "")
fmt.Println(response.Set.Title)
}
| [
"\"FLICKRGO_API_KEY\"",
"\"FLICKRGO_API_SECRET\"",
"\"FLICKRGO_OAUTH_TOKEN\"",
"\"FLICKRGO_OAUTH_TOKEN_SECRET\"",
"\"FLICKRGO_USER_ID\""
]
| []
| [
"FLICKRGO_OAUTH_TOKEN_SECRET",
"FLICKRGO_API_KEY",
"FLICKRGO_OAUTH_TOKEN",
"FLICKRGO_API_SECRET",
"FLICKRGO_USER_ID"
]
| [] | ["FLICKRGO_OAUTH_TOKEN_SECRET", "FLICKRGO_API_KEY", "FLICKRGO_OAUTH_TOKEN", "FLICKRGO_API_SECRET", "FLICKRGO_USER_ID"] | go | 5 | 0 | |
tests/system/providers/microsoft/azure/example_adf_run_pipeline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from datetime import datetime, timedelta
from airflow.models import DAG
try:
from airflow.operators.empty import EmptyOperator
except ModuleNotFoundError:
from airflow.operators.dummy import DummyOperator as EmptyOperator # type: ignore
from airflow.providers.microsoft.azure.operators.data_factory import AzureDataFactoryRunPipelineOperator
from airflow.providers.microsoft.azure.sensors.data_factory import AzureDataFactoryPipelineRunStatusSensor
from airflow.utils.edgemodifier import Label
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_adf_run_pipeline"
with DAG(
dag_id=DAG_ID,
start_date=datetime(2021, 8, 13),
schedule_interval="@daily",
catchup=False,
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=3),
"azure_data_factory_conn_id": "azure_data_factory",
"factory_name": "my-data-factory", # This can also be specified in the ADF connection.
"resource_group_name": "my-resource-group", # This can also be specified in the ADF connection.
},
default_view="graph",
) as dag:
begin = EmptyOperator(task_id="begin")
end = EmptyOperator(task_id="end")
# [START howto_operator_adf_run_pipeline]
run_pipeline1 = AzureDataFactoryRunPipelineOperator(
task_id="run_pipeline1",
pipeline_name="pipeline1",
parameters={"myParam": "value"},
)
# [END howto_operator_adf_run_pipeline]
# [START howto_operator_adf_run_pipeline_async]
run_pipeline2 = AzureDataFactoryRunPipelineOperator(
task_id="run_pipeline2",
pipeline_name="pipeline2",
wait_for_termination=False,
)
pipeline_run_sensor = AzureDataFactoryPipelineRunStatusSensor(
task_id="pipeline_run_sensor",
run_id=run_pipeline2.output["run_id"],
)
# [END howto_operator_adf_run_pipeline_async]
begin >> Label("No async wait") >> run_pipeline1
begin >> Label("Do async wait with sensor") >> run_pipeline2
[run_pipeline1, pipeline_run_sensor] >> end
# Task dependency created via `XComArgs`:
# run_pipeline2 >> pipeline_run_sensor
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| []
| []
| [
"SYSTEM_TESTS_ENV_ID"
]
| [] | ["SYSTEM_TESTS_ENV_ID"] | python | 1 | 0 | |
pkg/csource/csource_test.go | // Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package csource
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
_ "github.com/google/syzkaller/sys"
"github.com/google/syzkaller/sys/targets"
)
func TestGenerate(t *testing.T) {
t.Parallel()
checked := make(map[string]bool)
for _, target := range prog.AllTargets() {
target := target
sysTarget := targets.Get(target.OS, target.Arch)
if runtime.GOOS != sysTarget.BuildOS {
continue
}
t.Run(target.OS+"/"+target.Arch, func(t *testing.T) {
if target.OS == "linux" && target.Arch == "arm64" {
// Episodically fails on travis with:
// collect2: error: ld terminated with signal 11 [Segmentation fault]
t.Skip("broken")
}
if target.OS == "test" && target.PtrSize == 4 {
// The same reason as linux/32.
t.Skip("broken")
}
if _, err := exec.LookPath(sysTarget.CCompiler); err != nil {
t.Skipf("no target compiler %v", sysTarget.CCompiler)
}
bin, err := Build(target, []byte(`
#include <stdio.h>
int main() { printf("Hello, World!\n"); }
`))
if err != nil {
t.Skipf("target compiler is broken: %v", err)
}
os.Remove(bin)
full := !checked[target.OS]
checked[target.OS] = true
t.Parallel()
testTarget(t, target, full)
})
}
}
// This is the main configuration used by executor, so we want to test it as well.
var executorOpts = Options{
Threaded: true,
Collide: true,
Repeat: true,
Procs: 2,
Sandbox: "none",
Repro: true,
UseTmpDir: true,
}
func testTarget(t *testing.T, target *prog.Target, full bool) {
seed := time.Now().UnixNano()
if os.Getenv("TRAVIS") != "" {
seed = 0 // required for deterministic coverage reports
}
rs := rand.NewSource(seed)
t.Logf("seed=%v", seed)
p := target.Generate(rs, 10, nil)
// Turns out that fully minimized program can trigger new interesting warnings,
// e.g. about NULL arguments for functions that require non-NULL arguments in syz_ functions.
// We could append both AllSyzProg as-is and a minimized version of it,
// but this makes the NULL argument warnings go away (they showed up in ".constprop" versions).
// Testing 2 programs takes too long since we have lots of options permutations and OS/arch.
// So we use the as-is in short tests and minimized version in full tests.
syzProg := target.GenerateAllSyzProg(rs)
var opts []Options
if !full || testing.Short() {
p.Calls = append(p.Calls, syzProg.Calls...)
opts = allOptionsSingle(target.OS)
opts = append(opts, executorOpts)
} else {
minimized, _ := prog.Minimize(syzProg, -1, false, func(p *prog.Prog, call int) bool {
return len(p.Calls) == len(syzProg.Calls)
})
p.Calls = append(p.Calls, minimized.Calls...)
opts = allOptionsPermutations(target.OS)
}
for opti, opts := range opts {
opts := opts
t.Run(fmt.Sprintf("%v", opti), func(t *testing.T) {
t.Parallel()
testOne(t, p, opts)
})
}
}
func testOne(t *testing.T, p *prog.Prog, opts Options) {
src, err := Write(p, opts)
if err != nil {
t.Logf("opts: %+v\nprogram:\n%s\n", opts, p.Serialize())
t.Fatalf("%v", err)
}
bin, err := Build(p.Target, src)
if err != nil {
t.Logf("opts: %+v\nprogram:\n%s\n", opts, p.Serialize())
t.Fatalf("%v", err)
}
defer os.Remove(bin)
}
func TestSysTests(t *testing.T) {
t.Parallel()
for _, target := range prog.AllTargets() {
target := target
sysTarget := targets.Get(target.OS, target.Arch)
if runtime.GOOS != sysTarget.BuildOS {
continue // we need at least preprocessor binary to generate sources
}
t.Run(target.OS+"/"+target.Arch, func(t *testing.T) {
t.Parallel()
dir := filepath.Join("..", "..", "sys", target.OS, "test")
if !osutil.IsExist(dir) {
return
}
files, err := ioutil.ReadDir(dir)
if err != nil {
t.Fatalf("failed to read %v: %v", dir, err)
}
for _, finfo := range files {
file := filepath.Join(dir, finfo.Name())
if strings.HasSuffix(file, "~") || strings.HasSuffix(file, ".swp") {
continue
}
data, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("failed to read %v: %v", file, err)
}
p, err := target.Deserialize(data, prog.Strict)
if err != nil {
t.Fatalf("failed to parse program %v: %v", file, err)
}
_, err = Write(p, executorOpts)
if err != nil {
t.Fatalf("failed to generate C source for %v: %v", file, err)
}
}
})
}
}
func TestExecutorMacros(t *testing.T) {
// Ensure that executor does not mis-spell any of the SYZ_* macros.
target, _ := prog.GetTarget("test", "64")
p := target.Generate(rand.NewSource(0), 1, nil)
expected := commonDefines(p, Options{})
expected["SYZ_EXECUTOR"] = true
expected["SYZ_HAVE_SETUP_LOOP"] = true
expected["SYZ_HAVE_RESET_LOOP"] = true
expected["SYZ_HAVE_SETUP_TEST"] = true
macros := regexp.MustCompile("SYZ_[A-Za-z0-9_]+").FindAllString(commonHeader, -1)
for _, macro := range macros {
if strings.HasPrefix(macro, "SYZ_HAVE_") {
continue
}
if _, ok := expected[macro]; !ok {
t.Errorf("unexpected macro: %v", macro)
}
}
}
| [
"\"TRAVIS\""
]
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | go | 1 | 0 | |
simclr.py | import argparse
import os
import torch
import numpy as np
from utils.config import create_config
from utils.common_config import get_criterion, get_model, get_train_dataset,\
get_val_dataset, get_train_dataloader,\
get_val_dataloader, get_train_transformations,\
get_val_transformations, get_optimizer,\
adjust_learning_rate
from utils.evaluate_utils import contrastive_evaluate
from utils.memory import MemoryBank
from utils.train_utils import simclr_train
from utils.utils import fill_memory_bank
from termcolor import colored
import copy
# Parser
parser = argparse.ArgumentParser(description='SimCLR')
parser.add_argument('--config_env',
help='Config file for the environment')
parser.add_argument('--config_exp',
help='Config file for the experiment')
parser.add_argument('--cudaid', default=0)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '%s'%(args.cudaid)
#meta_info
meta_info = copy.deepcopy(args.__dict__)
p = create_config(args.config_env, args.config_exp, meta_info)
meta_info['mode'] = 'pretext'
def main():
# Retrieve config file
print(colored(p, 'red'))
# Model
print(colored('Retrieve model', 'blue'))
model = get_model(p)
print('Model is {}'.format(model.__class__.__name__))
print('Model parameters: {:.2f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))
print(model)
model = model.cuda()
# CUDNN
print(colored('Set CuDNN benchmark', 'blue'))
torch.backends.cudnn.benchmark = True
# Dataset
print(colored('Retrieve dataset', 'blue'))
train_transforms = get_train_transformations(p)
print('Train transforms:', train_transforms)
val_transforms = get_val_transformations(p)
print('Validation transforms:', val_transforms)
train_dataset = get_train_dataset(p, train_transforms, to_augmented_dataset=True, to_noisy_dataset=p['to_noisy_dataset'],
split='train+unlabeled', meta_info=meta_info) # Split is for stl-10
val_dataset = get_val_dataset(p, val_transforms, meta_info=meta_info)
train_dataloader = get_train_dataloader(p, train_dataset)
val_dataloader = get_val_dataloader(p, val_dataset)
print('Dataset contains {}/{} train/val samples'.format(len(train_dataset), len(val_dataset)))
# Memory Bank
print(colored('Build MemoryBank', 'blue'))
base_dataset = get_train_dataset(p, val_transforms, to_noisy_dataset=p['to_noisy_dataset'], split='train', meta_info=meta_info) # Dataset w/o augs for knn eval
base_dataloader = get_val_dataloader(p, base_dataset)
memory_bank_base = MemoryBank(len(base_dataset),
p['model_kwargs']['features_dim'],
p['num_classes'], p['criterion_kwargs']['temperature'])
memory_bank_base.cuda()
memory_bank_val = MemoryBank(len(val_dataset),
p['model_kwargs']['features_dim'],
p['num_classes'], p['criterion_kwargs']['temperature'])
memory_bank_val.cuda()
# Criterion
print(colored('Retrieve criterion', 'blue'))
criterion = get_criterion(p)
print('Criterion is {}'.format(criterion.__class__.__name__))
criterion = criterion.cuda()
# Optimizer and scheduler
print(colored('Retrieve optimizer', 'blue'))
optimizer = get_optimizer(p, model)
print(optimizer)
# Checkpoint
if os.path.exists(p['pretext_checkpoint']):
print(colored('Restart from checkpoint {}'.format(p['pretext_checkpoint']), 'blue'))
checkpoint = torch.load(p['pretext_checkpoint'], map_location='cpu')
optimizer.load_state_dict(checkpoint['optimizer'])
model.load_state_dict(checkpoint['model'])
model.cuda()
start_epoch = checkpoint['epoch']
else:
print(colored('No checkpoint file at {}'.format(p['pretext_checkpoint']), 'blue'))
start_epoch = 0
model = model.cuda()
# Training
print(colored('Starting main loop', 'blue'))
for epoch in range(start_epoch, p['epochs']):
print(colored('Epoch %d/%d' %(epoch, p['epochs']), 'yellow'))
print(colored('-'*15, 'yellow'))
# Adjust lr
lr = adjust_learning_rate(p, optimizer, epoch)
print('Adjusted learning rate to {:.5f}'.format(lr))
# Train
print('Train ...')
simclr_train(train_dataloader, model, criterion, optimizer, epoch)
# Fill memory bank
print('Fill memory bank for kNN...')
fill_memory_bank(base_dataloader, model, memory_bank_base)
# Evaluate (To monitor progress - Not for validation)
print('Evaluate ...')
top1 = contrastive_evaluate(val_dataloader, model, memory_bank_base)
print('Result of kNN evaluation is %.2f' %(top1))
# Checkpoint
print('Checkpoint ...')
torch.save({'optimizer': optimizer.state_dict(), 'model': model.state_dict(),
'epoch': epoch + 1}, p['pretext_checkpoint'])
# Save final model
torch.save(model.state_dict(), p['pretext_model'])
# Mine the topk nearest neighbors at the very end (Train)
# These will be served as input to the SCAN loss.
print(colored('Fill memory bank for mining the nearest neighbors (train) ...', 'blue'))
fill_memory_bank(base_dataloader, model, memory_bank_base)
topk = 20
print('Mine the nearest neighbors (Top-%d)' %(topk))
indices, acc = memory_bank_base.mine_nearest_neighbors(topk)
print('Accuracy of top-%d nearest neighbors on train set is %.2f' %(topk, 100*acc))
np.save(p['topk_neighbors_train_path'], indices)
# Mine the topk nearest neighbors at the very end (Val)
# These will be used for validation.
print(colored('Fill memory bank for mining the nearest neighbors (val) ...', 'blue'))
fill_memory_bank(val_dataloader, model, memory_bank_val)
topk = 5
print('Mine the nearest neighbors (Top-%d)' %(topk))
indices, acc = memory_bank_val.mine_nearest_neighbors(topk)
print('Accuracy of top-%d nearest neighbors on val set is %.2f' %(topk, 100*acc))
np.save(p['topk_neighbors_val_path'], indices)
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
main.py | import os
import sys
import argparse
import datetime
import time
import csv
import os.path as osp
import numpy as np
import warnings
import importlib
import pandas as pd
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import torchvision
from datasets import CIFAR10D, CIFAR100D
from utils.utils import AverageMeter, Logger, save_networks, load_networks
from core import train, test, test_robustness
parser = argparse.ArgumentParser("Training")
# dataset
parser.add_argument('--data', type=str, default='./data')
parser.add_argument('--outf', type=str, default='./results')
parser.add_argument('-d', '--dataset', type=str, default='cifar10')
parser.add_argument('--workers', default=8, type=int, help="number of data loading workers (default: 4)")
# optimization
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.1, help="learning rate for model")
parser.add_argument('--max-epoch', type=int, default=200)
parser.add_argument('--stepsize', type=int, default=30)
parser.add_argument('--aug', type=str, default='none', help='none, aprs')
# model
parser.add_argument('--model', type=str, default='wider_resnet_28_10')
# misc
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--eval', action='store_true', help="Eval", default=False)
# parameters for generating adversarial examples
parser.add_argument('--epsilon', '-e', type=float, default=0.0157,
help='maximum perturbation of adversaries (4/255=0.0157)')
parser.add_argument('--alpha', '-a', type=float, default=0.00784,
help='movement multiplier per iteration when generating adversarial examples (2/255=0.00784)')
parser.add_argument('--k', '-k', type=int, default=10,
help='maximum iteration when generating adversarial examples')
parser.add_argument('--perturbation_type', '-p', choices=['linf', 'l2'], default='linf',
help='the type of the perturbation (linf or l2)')
args = parser.parse_args()
options = vars(args)
if not os.path.exists(options['outf']):
os.makedirs(options['outf'])
if not os.path.exists(options['data']):
os.makedirs(options['data'])
sys.stdout = Logger(osp.join(options['outf'], 'logs.txt'))
def main():
torch.manual_seed(options['seed'])
os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu']
use_gpu = torch.cuda.is_available()
if options['use_cpu']: use_gpu = False
options.update({'use_gpu': use_gpu})
if use_gpu:
print("Currently using GPU: {}".format(options['gpu']))
cudnn.benchmark = True
torch.cuda.manual_seed_all(options['seed'])
else:
print("Currently using CPU")
if 'cifar10' == options['dataset']:
Data = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
else:
Data = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
trainloader, testloader, outloader = Data.train_loader, Data.test_loader, OODData.test_loader
num_classes = Data.num_classes
print("Creating model: {}".format(options['model']))
if 'wide_resnet' in options['model']:
print('wide_resnet')
from model.wide_resnet import WideResNet
net = WideResNet(40, num_classes, 2, 0.0)
elif 'allconv' in options['model']:
print('allconv')
from model.allconv import AllConvNet
net = AllConvNet(num_classes)
elif 'densenet' in options['model']:
print('densenet')
from model.densenet import densenet
net = densenet(num_classes=num_classes)
elif 'resnext' in options['model']:
print('resnext29')
from model.resnext import resnext29
net = resnext29(num_classes)
else:
print('resnet18')
from model.resnet import ResNet18
net = ResNet18(num_classes=num_classes)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if use_gpu:
net = nn.DataParallel(net, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
criterion = criterion.cuda()
file_name = '{}_{}_{}'.format(options['model'], options['dataset'], options['aug'])
if options['eval']:
net, criterion = load_networks(net, options['outf'], file_name, criterion=criterion)
outloaders = Data.out_loaders
results = test(net, criterion, testloader, outloader, epoch=0, **options)
acc = results['ACC']
res = dict()
res['ACC'] = dict()
acc_res = []
for key in Data.out_keys:
results = test_robustness(net, criterion, outloaders[key], epoch=0, label=key, **options)
print('{} (%): {:.3f}\t'.format(key, results['ACC']))
res['ACC'][key] = results['ACC']
acc_res.append(results['ACC'])
print('Mean ACC:', np.mean(acc_res))
print('Mean Error:', 100-np.mean(acc_res))
return
params_list = [{'params': net.parameters()},
{'params': criterion.parameters()}]
optimizer = torch.optim.SGD(params_list, lr=options['lr'], momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = lr_scheduler.MultiStepLR(optimizer, gamma=0.2, milestones=[60, 120, 160, 190])
start_time = time.time()
best_acc = 0.0
for epoch in range(options['max_epoch']):
print("==> Epoch {}/{}".format(epoch+1, options['max_epoch']))
train(net, criterion, optimizer, trainloader, epoch=epoch, **options)
if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch'] or epoch > 160:
print("==> Test")
results = test(net, criterion, testloader, outloader, epoch=epoch, **options)
if best_acc < results['ACC']:
best_acc = results['ACC']
print("Best Acc (%): {:.3f}\t".format(best_acc))
save_networks(net, options['outf'], file_name, criterion=criterion)
scheduler.step()
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
integration/copy_test.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"github.com/containers/image/manifest"
"github.com/containers/image/signature"
"github.com/go-check/check"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/image-tools/image"
)
func init() {
check.Suite(&CopySuite{})
}
const (
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
v2s1DockerRegistryURL = "localhost:5556"
)
type CopySuite struct {
cluster *openshiftCluster
registry *testRegistryV2
s1Registry *testRegistryV2
gpgHome string
}
func (s *CopySuite) SetUpSuite(c *check.C) {
if os.Getenv("SKOPEO_CONTAINER_TESTS") != "1" {
c.Skip("Not running in a container, refusing to affect user state")
}
s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression", "schema1", "schema2"} {
isJSON := fmt.Sprintf(`{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "%s"
},
"spec": {}
}`, stream)
runCommandWithInput(c, isJSON, "oc", "create", "-f", "-")
}
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false)
s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true)
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
c.Assert(err, check.IsNil)
s.gpgHome = gpgHome
os.Setenv("GNUPGHOME", s.gpgHome)
for _, key := range []string{"personal", "official"} {
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %[email protected]\n%%commit\n",
key, key)
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%[email protected]", key))
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
[]byte(out), 0600)
c.Assert(err, check.IsNil)
}
}
func (s *CopySuite) TearDownSuite(c *check.C) {
if s.gpgHome != "" {
os.RemoveAll(s.gpgHome)
}
if s.registry != nil {
s.registry.Close()
}
if s.s1Registry != nil {
s.s1Registry.Close()
}
if s.cluster != nil {
s.cluster.tearDown(c)
}
}
func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
dir, err := ioutil.TempDir("", "copy-manifest-list")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir)
}
func (s *CopySuite) TestCopyFailsWhenImageOSDoesntMatchRuntimeOS(c *check.C) {
c.Skip("can't run this on Travis")
assertSkopeoFails(c, `.*image operating system "windows" cannot be used on "linux".*`, "copy", "docker://microsoft/windowsservercore", "containers-storage:test")
}
func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
dir1, err := ioutil.TempDir("", "copy-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
dir2, err := ioutil.TempDir("", "copy-2")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
// "push": dir: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
// The result of pushing and pulling is an equivalent image, except for schema1 embedded names.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:unsigned")
}
// The most basic (skopeo copy) use:
func (s *CopySuite) TestCopySimple(c *check.C) {
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
dir1, err := ioutil.TempDir("", "copy-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
dir2, err := ioutil.TempDir("", "copy-2")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
// "push": dir: → docker(v2s2):
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"busybox:unsigned")
// The result of pushing and pulling is an unmodified image.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"busybox:unsigned", "dir:"+dir2)
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
c.Assert(out, check.Equals, "")
// docker v2s2 -> OCI image layout with image name
// ociDest will be created by oci: if it doesn't exist
// so don't create it here to exercise auto-creation
ociDest := "busybox-latest-image"
ociImgName := "busybox"
defer os.RemoveAll(ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest+":"+ociImgName)
_, err = os.Stat(ociDest)
c.Assert(err, check.IsNil)
// docker v2s2 -> OCI image layout without image name
ociDest = "busybox-latest-noimage"
defer os.RemoveAll(ociDest)
assertSkopeoFails(c, ".*Error initializing destination oci:busybox-latest-noimage:: cannot save image with empty image.ref.name.*", "copy", "docker://busybox:latest", "oci:"+ociDest)
}
// Check whether dir: images in dir1 and dir2 are equal, ignoring schema1 signatures.
func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
// The manifests may have different JWS signatures; so, compare the manifests by digests, which
// strips the signatures.
digests := []digest.Digest{}
for _, dir := range []string{dir1, dir2} {
manifestPath := filepath.Join(dir, "manifest.json")
m, err := ioutil.ReadFile(manifestPath)
c.Assert(err, check.IsNil)
digest, err := manifest.Digest(m)
c.Assert(err, check.IsNil)
digests = append(digests, digest)
}
c.Assert(digests[0], check.Equals, digests[1])
// Then compare the rest file by file.
out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2)
c.Assert(out, check.Equals, "")
}
// Check whether schema1 dir: images in dir1 and dir2 are equal, ignoring schema1 signatures and the embedded path/tag values, which should have the expected values.
func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref2 string) {
// The manifests may have different JWS signatures and names; so, unmarshal and delete these elements.
manifests := []map[string]interface{}{}
for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} {
manifestPath := filepath.Join(dir, "manifest.json")
m, err := ioutil.ReadFile(manifestPath)
c.Assert(err, check.IsNil)
data := map[string]interface{}{}
err = json.Unmarshal(m, &data)
c.Assert(err, check.IsNil)
c.Assert(data["schemaVersion"], check.Equals, float64(1))
colon := strings.LastIndex(ref, ":")
c.Assert(colon, check.Not(check.Equals), -1)
c.Assert(data["name"], check.Equals, ref[:colon])
c.Assert(data["tag"], check.Equals, ref[colon+1:])
for _, key := range []string{"signatures", "name", "tag"} {
delete(data, key)
}
manifests = append(manifests, data)
}
c.Assert(manifests[0], check.DeepEquals, manifests[1])
// Then compare the rest file by file.
out := combinedOutputOfCommand(c, "diff", "-urN", "-x", "manifest.json", dir1, dir2)
c.Assert(out, check.Equals, "")
}
// Streaming (skopeo copy)
func (s *CopySuite) TestCopyStreaming(c *check.C) {
dir1, err := ioutil.TempDir("", "streaming-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
dir2, err := ioutil.TempDir("", "streaming-2")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// streaming: docker: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
// Compare (copies of) the original and the copy:
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:streaming")
// FIXME: Also check pushing to docker://
}
// OCI round-trip testing. It's very important to make sure that OCI <-> Docker
// conversion works (while skopeo handles many things, one of the most obvious
// benefits of a tool like skopeo is that you can use OCI tooling to create an
// image and then as the final step convert the image to a non-standard format
// like Docker). But this only works if we _test_ it.
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
oci1, err := ioutil.TempDir("", "oci-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(oci1)
oci2, err := ioutil.TempDir("", "oci-2")
c.Assert(err, check.IsNil)
defer os.RemoveAll(oci2)
// Docker -> OCI
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
// OCI -> Docker
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
// Docker -> OCI
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", ourRegistry+"original/busybox:oci_copy", "oci:"+oci2+":latest")
// OCI -> Docker
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci2+":latest", ourRegistry+"original/busybox:oci_copy2")
// TODO: Add some more tags to output to and check those work properly.
// First, make sure the OCI blobs are the same. This should _always_ be true.
out := combinedOutputOfCommand(c, "diff", "-urN", oci1+"/blobs", oci2+"/blobs")
c.Assert(out, check.Equals, "")
// For some silly reason we pass a logger to the OCI library here...
logger := log.New(os.Stderr, "", 0)
// Verify using the upstream OCI image validator, this should catch most
// non-compliance errors. DO NOT REMOVE THIS TEST UNLESS IT'S ABSOLUTELY
// NECESSARY.
err = image.ValidateLayout(oci1, nil, logger)
c.Assert(err, check.IsNil)
err = image.ValidateLayout(oci2, nil, logger)
c.Assert(err, check.IsNil)
// Now verify that everything is identical. Currently this is true, but
// because we recompute the manifests on-the-fly this doesn't necessarily
// always have to be true (but if this breaks in the future __PLEASE__ make
// sure that the breakage actually makes sense before removing this check).
out = combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
c.Assert(out, check.Equals, "")
}
// --sign-by and --policy copy, primarily using atomic:
func (s *CopySuite) TestCopySignatures(c *check.C) {
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
c.Assert(err, check.IsNil)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
}
dir, err := ioutil.TempDir("", "signatures-dest")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir)
dirDest := "dir:" + dir
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
defer os.Remove(policy)
// type: reject
assertSkopeoFails(c, ".*Source image rejected: Running image docker://busybox:latest is rejected by policy.*",
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
// type: insecureAcceptAnything
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://openshift/hello-openshift", dirDest)
// type: signedBy
// Sign the images
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "[email protected]", "docker://busybox:1.26", "atomic:localhost:5006/myns/personal:personal")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "[email protected]", "docker://busybox:1.26.1", "atomic:localhost:5006/myns/official:official")
// Verify that we can pull them
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest)
// Verify that mis-signed images are rejected
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/personal:personal", "atomic:localhost:5006/myns/official:attack")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/personal:attack")
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:attack", dirDest)
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:attack", dirDest)
// Verify that signed identity is verified.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:test1")
assertSkopeoFails(c, ".*Source image rejected: Signature for identity localhost:5006/myns/official:official is not accepted.*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:test1", dirDest)
// signedIdentity works
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/naming:naming")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/naming:naming", dirDest)
// Verify that cosigning requirements are enforced
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
"--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "[email protected]", "atomic:localhost:5006/myns/official:official", "atomic:localhost:5006/myns/cosigned:cosigned")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/cosigned:cosigned", dirDest)
}
// --policy copy for dir: sources
func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
c.Assert(err, check.IsNil)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
}
topDir, err := ioutil.TempDir("", "dir-signatures-top")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
topDirDest := "dir:" + topDir
for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} {
err := os.MkdirAll(topDir+suffix, 0755)
c.Assert(err, check.IsNil)
}
// Note the "/@dirpath@": The value starts with a slash so that it is not rejected in other tests which do not replace it,
// but we must ensure that the result is a canonical path, not something starting with a "//".
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome, "/@dirpath@": topDir + "/restricted"})
defer os.Remove(policy)
// Get some images.
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
// Sign the images. By coping fom a topDirDest/dirN, also test that non-/restricted paths
// use the dir:"" default of insecureAcceptAnything.
// (For signing, we must push to atomic: to get a Docker identity to use in the signature.)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "[email protected]", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "[email protected]", topDirDest+"/dir2", "atomic:localhost:5000/myns/official:dirstaging")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging", topDirDest+"/restricted/personal")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/official:dirstaging", topDirDest+"/restricted/official")
// type: signedBy, with a signedIdentity override (necessary because dir: identities can't be signed)
// Verify that correct images are accepted
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", topDirDest+"/restricted/official", topDirDest+"/dest")
// ... and that mis-signed images are rejected.
assertSkopeoFails(c, ".*Source image rejected: Invalid GPG signature.*",
"--policy", policy, "copy", topDirDest+"/restricted/personal", topDirDest+"/dest")
// Verify that the signed identity is verified.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "[email protected]", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging2")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/personal:dirstaging2", topDirDest+"/restricted/badidentity")
assertSkopeoFails(c, ".*Source image rejected: .*Signature for identity localhost:5000/myns/personal:dirstaging2 is not accepted.*",
"--policy", policy, "copy", topDirDest+"/restricted/badidentity", topDirDest+"/dest")
}
// Compression during copy
func (s *CopySuite) TestCopyCompression(c *check.C) {
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710.tar"
topDir, err := ioutil.TempDir("", "compression-top")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
for i, t := range []struct{ fixture, remote string }{
{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"},
{"uncompressed-image-s2", "docker://" + v2DockerRegistryURL + "/compression/compression:s2"},
{"uncompressed-image-s1", "atomic:localhost:5000/myns/compression:s1"},
{"uncompressed-image-s2", "atomic:localhost:5000/myns/compression:s2"},
} {
dir := filepath.Join(topDir, fmt.Sprintf("case%d", i))
err := os.MkdirAll(dir, 0755)
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "dir:fixtures/"+t.fixture, t.remote)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", t.remote, "dir:"+dir)
// The original directory contained an uncompressed file, the copy after pushing and pulling doesn't (we use a different name for the compressed file).
_, err = os.Lstat(filepath.Join("fixtures", t.fixture, uncompresssedLayerFile))
c.Assert(err, check.IsNil)
_, err = os.Lstat(filepath.Join(dir, uncompresssedLayerFile))
c.Assert(err, check.NotNil)
c.Assert(os.IsNotExist(err), check.Equals, true)
// All pulled layers are smaller than the uncompressed size of uncompresssedLayerFile. (Note that this includes the manifest in s2, but that works out OK).
dirf, err := os.Open(dir)
c.Assert(err, check.IsNil)
fis, err := dirf.Readdir(-1)
c.Assert(err, check.IsNil)
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".tar") {
c.Assert(fi.Size() < 2048, check.Equals, true)
}
}
}
}
func findRegularFiles(c *check.C, root string) []string {
result := []string{}
err := filepath.Walk(root, filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode().IsRegular() {
result = append(result, path)
}
return nil
}))
c.Assert(err, check.IsNil)
return result
}
// --sign-by and policy use for docker: with sigstore
func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
c.Assert(err, check.IsNil)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
}
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
copyDest := filepath.Join(tmpDir, "dest")
err = os.Mkdir(copyDest, 0755)
c.Assert(err, check.IsNil)
dirDest := "dir:" + copyDest
plainSigstore := filepath.Join(tmpDir, "sigstore")
splitSigstoreStaging := filepath.Join(tmpDir, "sigstore-staging")
splitSigstoreReadServerHandler := http.NotFoundHandler()
splitSigstoreReadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
splitSigstoreReadServerHandler.ServeHTTP(w, r)
}))
defer splitSigstoreReadServer.Close()
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
defer os.Remove(policy)
registriesDir := filepath.Join(tmpDir, "registries.d")
err = os.Mkdir(registriesDir, 0755)
c.Assert(err, check.IsNil)
registriesFile := fileFromFixture(c, "fixtures/registries.yaml",
map[string]string{"@sigstore@": plainSigstore, "@split-staging@": splitSigstoreStaging, "@split-read@": splitSigstoreReadServer.URL})
err = os.Symlink(registriesFile, filepath.Join(registriesDir, "registries.yaml"))
c.Assert(err, check.IsNil)
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", ourRegistry+"original/busybox")
// Pulling an unsigned image fails.
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
// Signing with sigstore defined succeeds,
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "[email protected]", ourRegistry+"original/busybox", ourRegistry+"signed/busybox")
// a signature file has been created,
foundFiles := findRegularFiles(c, plainSigstore)
c.Assert(foundFiles, check.HasLen, 1)
// and pulling a signed image succeeds.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"signed/busybox", dirDest)
// Deleting the image succeeds,
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox")
// and the signature file has been deleted (but we leave the directories around).
foundFiles = findRegularFiles(c, plainSigstore)
c.Assert(foundFiles, check.HasLen, 0)
// Signing with a read/write sigstore split succeeds,
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "--sign-by", "[email protected]", ourRegistry+"original/busybox", ourRegistry+"public/busybox")
// and a signature file has been created.
foundFiles = findRegularFiles(c, splitSigstoreStaging)
c.Assert(foundFiles, check.HasLen, 1)
// Pulling the image fails because the read sigstore URL has not been populated:
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
// Pulling the image succeeds after the read sigstore URL is available:
splitSigstoreReadServerHandler = http.FileServer(http.Dir(splitSigstoreStaging))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
}
// atomic: and docker: X-Registry-Supports-Signatures works and interoperates
func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
c.Assert(err, check.IsNil)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that the reading/writing works using signatures from fixtures
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
}
topDir, err := ioutil.TempDir("", "atomic-extension")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
c.Assert(err, check.IsNil)
}
registriesDir := filepath.Join(topDir, "registries.d")
dirDest := "dir:" + topDir
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
defer os.Remove(policy)
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
// Pulling an unsigned image using atomic: fails.
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy,
"copy", "atomic:localhost:5000/myns/extension:unsigned", dirDest+"/dirAA")
// The same when pulling using docker:
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
"copy", "docker://localhost:5000/myns/extension:unsigned", dirDest+"/dirAD")
// Sign the image using atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false",
"copy", "--sign-by", "[email protected]", "atomic:localhost:5000/myns/extension:unsigned", "atomic:localhost:5000/myns/extension:atomic")
// Pulling the image using atomic: now succeeds.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy,
"copy", "atomic:localhost:5000/myns/extension:atomic", dirDest+"/dirAA")
// The same when pulling using docker:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
"copy", "docker://localhost:5000/myns/extension:atomic", dirDest+"/dirAD")
// Both access methods result in the same data.
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirAA"), filepath.Join(topDir, "dirAD"))
// Get another image (different so that they don't share signatures, and sign it using docker://)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
"copy", "--sign-by", "[email protected]", "docker://estesp/busybox:ppc64le", "atomic:localhost:5000/myns/extension:extension")
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
// Pulling the image using atomic: succeeds.
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
"copy", "atomic:localhost:5000/myns/extension:extension", dirDest+"/dirDA")
// The same when pulling using docker:
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
"copy", "docker://localhost:5000/myns/extension:extension", dirDest+"/dirDD")
// Both access methods result in the same data.
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
}
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
dir1, err := ioutil.TempDir("", "copy-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1)
}
func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
}
func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
}
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWOTLSVerifyFalse(c *check.C) {
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
// dir:test isn't created beforehand just because we already know this could
// just fail when evaluating the src
assertSkopeoFails(c, ".*server gave HTTP response to HTTPS client.*",
"copy", ourRegistry+"foobar", "dir:test")
}
func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
// Test conversion / schema autodetection both for the OpenShift embedded registry…
s.testCopySchemaConversionRegistries(c, "docker://localhost:5005/myns/schema1", "docker://localhost:5006/myns/schema2")
// … and for various docker/distribution registry versions.
s.testCopySchemaConversionRegistries(c, "docker://"+v2s1DockerRegistryURL+"/schema1", "docker://"+v2DockerRegistryURL+"/schema2")
}
func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
topDir, err := ioutil.TempDir("", "manifest-conversion")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
srcDir := filepath.Join(topDir, "source")
destDir1 := filepath.Join(topDir, "dest1")
destDir2 := filepath.Join(topDir, "dest2")
// oci to v2s1 and vice-versa not supported yet
// get v2s2 manifest type
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+srcDir)
verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType)
// convert from v2s2 to oci
assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1)
verifyManifestMIMEType(c, destDir1, imgspecv1.MediaTypeImageManifest)
// convert from oci to v2s2
assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2)
verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType)
// convert from v2s2 to v2s1
assertSkopeoSucceeds(c, "", "copy", "--format=v2s1", "dir:"+srcDir, "dir:"+destDir1)
verifyManifestMIMEType(c, destDir1, manifest.DockerV2Schema1SignedMediaType)
// convert from v2s1 to v2s2
assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2)
verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType)
}
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
topDir, err := ioutil.TempDir("", "schema-conversion")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
for _, subdir := range []string{"input1", "input2", "dest2"} {
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
c.Assert(err, check.IsNil)
}
input1Dir := filepath.Join(topDir, "input1")
input2Dir := filepath.Join(topDir, "input2")
destDir := filepath.Join(topDir, "dest2")
// Ensure we are working with a schema2 image.
// dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can.
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+input2Dir)
verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType)
// 2→2 (the "f2t2" in tag means "from 2 to 2")
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f2t2", "dir:"+destDir)
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema2MediaType)
// 2→1; we will use the result as a schema1 image for further tests.
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema1Registry+":f2t1")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f2t1", "dir:"+input1Dir)
verifyManifestMIMEType(c, input1Dir, manifest.DockerV2Schema1SignedMediaType)
// 1→1
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema1Registry+":f1t1")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema1Registry+":f1t1", "dir:"+destDir)
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
// 1→2: image stays unmodified schema1
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input1Dir, schema2Registry+":f1t2")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", schema2Registry+":f1t2", "dir:"+destDir)
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
}
// Verify manifest in a dir: image at dir is expectedMIMEType.
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
c.Assert(err, check.IsNil)
mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType)
}
| [
"\"SKOPEO_CONTAINER_TESTS\""
]
| []
| [
"SKOPEO_CONTAINER_TESTS"
]
| [] | ["SKOPEO_CONTAINER_TESTS"] | go | 1 | 0 | |
Project/wsgi.py | """
WSGI config for Project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Project.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/handler_custom/main.go | package main
import (
"fmt"
"os"
"github.com/mymmrac/telego"
th "github.com/mymmrac/telego/telegohandler"
)
func main() {
botToken := os.Getenv("TOKEN")
bot, err := telego.NewBot(botToken, telego.WithDefaultLogger(true, true))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Get updates channel
updates, _ := bot.UpdatesViaLongPulling(nil)
defer bot.StopLongPulling()
// Create bot handler and specify from where to get updates
bh, _ := th.NewBotHandler(bot, updates)
// Register handler with union predicate and not predicate
bh.Handle(func(bot *telego.Bot, update telego.Update) {
fmt.Println("Update with message text `Hmm?` or any other, but without message.")
}, th.Union(
th.Not(th.AnyMessage()), // Matches to any not message update
th.TextEqual("Hmm?"), // Matches to message update with text `Hmm?`
))
// Register handler with message predicate and custom predicate
bh.Handle(func(bot *telego.Bot, update telego.Update) {
fmt.Println("Update with message which text is longer then 7 chars.")
},
th.AnyMessage(), // Matches to any message update
func(update telego.Update) bool { // Matches to message update with text longer then 7
return len(update.Message.Text) > 7
},
)
// Register handler with commands and specific args
bh.Handle(func(bot *telego.Bot, update telego.Update) {
fmt.Println("Update with command `start` without args or `help` with any args")
}, th.TextContains("one"), th.TextPrefix("two"), th.TextSuffix("three"))
// Start handling updates
bh.Start()
// Stop handling updates
defer bh.Stop()
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | go | 1 | 0 | |
ftp_server/pyftpdlib/test/test_authorizers.py | #!/usr/bin/env python
# Copyright (C) 2007 Giampaolo Rodola' <[email protected]>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import os
import random
import string
import sys
import tempfile
import warnings
from pyftpdlib._compat import getcwdu
from pyftpdlib._compat import unicode
from pyftpdlib.authorizers import AuthenticationFailed
from pyftpdlib.authorizers import AuthorizerError
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.test import HOME
from pyftpdlib.test import PASSWD
from pyftpdlib.test import POSIX
from pyftpdlib.test import TESTFN
from pyftpdlib.test import touch
from pyftpdlib.test import unittest
from pyftpdlib.test import USER
from pyftpdlib.test import VERBOSITY
from pyftpdlib.test import WINDOWS
if POSIX:
import pwd
try:
from pyftpdlib.authorizers import UnixAuthorizer
except ImportError:
UnixAuthorizer = None
else:
UnixAuthorizer = None
if WINDOWS:
from pywintypes import error as Win32ExtError
from pyftpdlib.authorizers import WindowsAuthorizer
else:
WindowsAuthorizer = None
class TestDummyAuthorizer(unittest.TestCase):
"""Tests for DummyAuthorizer class."""
# temporarily change warnings to exceptions for the purposes of testing
def setUp(self):
self.tempdir = tempfile.mkdtemp(dir=HOME)
self.subtempdir = tempfile.mkdtemp(
dir=os.path.join(HOME, self.tempdir))
self.tempfile = touch(os.path.join(self.tempdir, TESTFN))
self.subtempfile = touch(os.path.join(self.subtempdir, TESTFN))
warnings.filterwarnings("error")
def tearDown(self):
os.remove(self.tempfile)
os.remove(self.subtempfile)
os.rmdir(self.subtempdir)
os.rmdir(self.tempdir)
warnings.resetwarnings()
def test_common_methods(self):
auth = DummyAuthorizer()
# create user
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
# check credentials
auth.validate_authentication(USER, PASSWD, None)
self.assertRaises(AuthenticationFailed,
auth.validate_authentication, USER, 'wrongpwd', None)
auth.validate_authentication('anonymous', 'foo', None)
auth.validate_authentication('anonymous', '', None) # empty passwd
# remove them
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.remove_user, USER)
# raise exc if path does not exist
self.assertRaisesRegex(ValueError,
'no such directory',
auth.add_user, USER, PASSWD, '?:\\')
self.assertRaisesRegex(ValueError,
'no such directory',
auth.add_anonymous, '?:\\')
# raise exc if user already exists
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
self.assertRaisesRegex(ValueError,
'user %r already exists' % USER,
auth.add_user, USER, PASSWD, HOME)
self.assertRaisesRegex(ValueError,
"user 'anonymous' already exists",
auth.add_anonymous, HOME)
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise on wrong permission
self.assertRaisesRegex(ValueError,
"no such permission",
auth.add_user, USER, PASSWD, HOME, perm='?')
self.assertRaisesRegex(ValueError,
"no such permission",
auth.add_anonymous, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
for x in "adfmw":
self.assertRaisesRegex(
RuntimeWarning,
"write permissions assigned to anonymous user.",
auth.add_anonymous, HOME, perm=x)
def test_override_perm_interface(self):
auth = DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.override_perm, USER + 'w',
HOME, 'elr')
# raise exc if path does not exist or it's not a directory
self.assertRaisesRegex(ValueError,
'no such directory',
auth.override_perm, USER, '?:\\', 'elr')
self.assertRaisesRegex(ValueError,
'no such directory',
auth.override_perm, USER, self.tempfile, 'elr')
# raise on wrong permission
self.assertRaisesRegex(ValueError,
"no such permission", auth.override_perm,
USER, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
auth.add_anonymous(HOME)
for p in "adfmw":
self.assertRaisesRegex(
RuntimeWarning,
"write permissions assigned to anonymous user.",
auth.override_perm, 'anonymous', HOME, p)
# raise on attempt to override home directory permissions
self.assertRaisesRegex(ValueError,
"can't override home directory permissions",
auth.override_perm, USER, HOME, perm='w')
# raise on attempt to override a path escaping home directory
if os.path.dirname(HOME) != HOME:
self.assertRaisesRegex(ValueError,
"path escapes user home directory",
auth.override_perm, USER,
os.path.dirname(HOME), perm='w')
# try to re-set an overridden permission
auth.override_perm(USER, self.tempdir, perm='w')
auth.override_perm(USER, self.tempdir, perm='wr')
def test_override_perm_recursive_paths(self):
auth = DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), False)
auth.override_perm(USER, self.tempdir, perm='w', recursive=True)
self.assertEqual(auth.has_perm(USER, 'w', HOME), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.tempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', HOME + '@'), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir + '@'), False)
path = os.path.join(self.tempdir + '@',
os.path.basename(self.tempfile))
self.assertEqual(auth.has_perm(USER, 'w', path), False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assertTrue(auth.has_perm(USER, 'w', self.tempdir.upper()))
def test_override_perm_not_recursive_paths(self):
auth = DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), False)
auth.override_perm(USER, self.tempdir, perm='w')
self.assertEqual(auth.has_perm(USER, 'w', HOME), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.tempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempdir), False)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempfile), False)
self.assertEqual(auth.has_perm(USER, 'w', HOME + '@'), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir + '@'), False)
path = os.path.join(self.tempdir + '@',
os.path.basename(self.tempfile))
self.assertEqual(auth.has_perm(USER, 'w', path), False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir.upper()),
True)
class _SharedAuthorizerTests(object):
"""Tests valid for both UnixAuthorizer and WindowsAuthorizer for
those parts which share the same API.
"""
authorizer_class = None
# --- utils
def get_users(self):
return self.authorizer_class._get_system_users()
def get_current_user(self):
if POSIX:
return pwd.getpwuid(os.getuid()).pw_name
else:
return os.environ['USERNAME']
def get_current_user_homedir(self):
if POSIX:
return pwd.getpwuid(os.getuid()).pw_dir
else:
return os.environ['USERPROFILE']
def get_nonexistent_user(self):
# return a user which does not exist on the system
users = self.get_users()
letters = string.ascii_lowercase
while True:
user = ''.join([random.choice(letters) for i in range(10)])
if user not in users:
return user
def assertRaisesWithMsg(self, excClass, msg, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except excClass as err:
if str(err) == msg:
return
raise self.failureException("%s != %s" % (str(err), msg))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
# --- /utils
def test_get_home_dir(self):
auth = self.authorizer_class()
home = auth.get_home_dir(self.get_current_user())
self.assertTrue(isinstance(home, unicode))
nonexistent_user = self.get_nonexistent_user()
self.assertTrue(os.path.isdir(home))
if auth.has_user('nobody'):
home = auth.get_home_dir('nobody')
self.assertRaises(AuthorizerError,
auth.get_home_dir, nonexistent_user)
def test_has_user(self):
auth = self.authorizer_class()
current_user = self.get_current_user()
nonexistent_user = self.get_nonexistent_user()
self.assertTrue(auth.has_user(current_user))
self.assertFalse(auth.has_user(nonexistent_user))
auth = self.authorizer_class(rejected_users=[current_user])
self.assertFalse(auth.has_user(current_user))
def test_validate_authentication(self):
# can't test for actual success in case of valid authentication
# here as we don't have the user password
if self.authorizer_class.__name__ == 'UnixAuthorizer':
auth = self.authorizer_class(require_valid_shell=False)
else:
auth = self.authorizer_class()
current_user = self.get_current_user()
nonexistent_user = self.get_nonexistent_user()
self.assertRaises(
AuthenticationFailed,
auth.validate_authentication, current_user, 'wrongpasswd', None)
self.assertRaises(
AuthenticationFailed,
auth.validate_authentication, nonexistent_user, 'bar', None)
def test_impersonate_user(self):
auth = self.authorizer_class()
nonexistent_user = self.get_nonexistent_user()
try:
if self.authorizer_class.__name__ == 'UnixAuthorizer':
auth.impersonate_user(self.get_current_user(), '')
self.assertRaises(
AuthorizerError,
auth.impersonate_user, nonexistent_user, 'pwd')
else:
self.assertRaises(
Win32ExtError,
auth.impersonate_user, nonexistent_user, 'pwd')
self.assertRaises(
Win32ExtError,
auth.impersonate_user, self.get_current_user(), '')
finally:
auth.terminate_impersonation('')
def test_terminate_impersonation(self):
auth = self.authorizer_class()
auth.terminate_impersonation('')
auth.terminate_impersonation('')
def test_get_perms(self):
auth = self.authorizer_class(global_perm='elr')
self.assertTrue('r' in auth.get_perms(self.get_current_user()))
self.assertFalse('w' in auth.get_perms(self.get_current_user()))
def test_has_perm(self):
auth = self.authorizer_class(global_perm='elr')
self.assertTrue(auth.has_perm(self.get_current_user(), 'r'))
self.assertFalse(auth.has_perm(self.get_current_user(), 'w'))
def test_messages(self):
auth = self.authorizer_class(msg_login="login", msg_quit="quit")
self.assertTrue(auth.get_msg_login, "login")
self.assertTrue(auth.get_msg_quit, "quit")
def test_error_options(self):
wrong_user = self.get_nonexistent_user()
self.assertRaisesWithMsg(
AuthorizerError,
"rejected_users and allowed_users options are mutually exclusive",
self.authorizer_class, allowed_users=['foo'],
rejected_users=['bar'])
self.assertRaisesWithMsg(
AuthorizerError,
'invalid username "anonymous"',
self.authorizer_class, allowed_users=['anonymous'])
self.assertRaisesWithMsg(
AuthorizerError,
'invalid username "anonymous"',
self.authorizer_class, rejected_users=['anonymous'])
self.assertRaisesWithMsg(
AuthorizerError,
'unknown user %s' % wrong_user,
self.authorizer_class, allowed_users=[wrong_user])
self.assertRaisesWithMsg(AuthorizerError,
'unknown user %s' % wrong_user,
self.authorizer_class,
rejected_users=[wrong_user])
def test_override_user_password(self):
auth = self.authorizer_class()
user = self.get_current_user()
auth.override_user(user, password='foo')
auth.validate_authentication(user, 'foo', None)
self.assertRaises(AuthenticationFailed(auth.validate_authentication,
user, 'bar', None))
# make sure other settings keep using default values
self.assertEqual(auth.get_home_dir(user),
self.get_current_user_homedir())
self.assertEqual(auth.get_perms(user), "elradfmw")
self.assertEqual(auth.get_msg_login(user), "Login successful.")
self.assertEqual(auth.get_msg_quit(user), "Goodbye.")
def test_override_user_homedir(self):
auth = self.authorizer_class()
user = self.get_current_user()
dir = os.path.dirname(getcwdu())
auth.override_user(user, homedir=dir)
self.assertEqual(auth.get_home_dir(user), dir)
# make sure other settings keep using default values
# self.assertEqual(auth.get_home_dir(user),
# self.get_current_user_homedir())
self.assertEqual(auth.get_perms(user), "elradfmw")
self.assertEqual(auth.get_msg_login(user), "Login successful.")
self.assertEqual(auth.get_msg_quit(user), "Goodbye.")
def test_override_user_perm(self):
auth = self.authorizer_class()
user = self.get_current_user()
auth.override_user(user, perm="elr")
self.assertEqual(auth.get_perms(user), "elr")
# make sure other settings keep using default values
self.assertEqual(auth.get_home_dir(user),
self.get_current_user_homedir())
# self.assertEqual(auth.get_perms(user), "elradfmw")
self.assertEqual(auth.get_msg_login(user), "Login successful.")
self.assertEqual(auth.get_msg_quit(user), "Goodbye.")
def test_override_user_msg_login_quit(self):
auth = self.authorizer_class()
user = self.get_current_user()
auth.override_user(user, msg_login="foo", msg_quit="bar")
self.assertEqual(auth.get_msg_login(user), "foo")
self.assertEqual(auth.get_msg_quit(user), "bar")
# make sure other settings keep using default values
self.assertEqual(auth.get_home_dir(user),
self.get_current_user_homedir())
self.assertEqual(auth.get_perms(user), "elradfmw")
# self.assertEqual(auth.get_msg_login(user), "Login successful.")
# self.assertEqual(auth.get_msg_quit(user), "Goodbye.")
def test_override_user_errors(self):
if self.authorizer_class.__name__ == 'UnixAuthorizer':
auth = self.authorizer_class(require_valid_shell=False)
else:
auth = self.authorizer_class()
this_user = self.get_current_user()
for x in self.get_users():
if x != this_user:
another_user = x
break
nonexistent_user = self.get_nonexistent_user()
self.assertRaisesWithMsg(
AuthorizerError,
"at least one keyword argument must be specified",
auth.override_user, this_user)
self.assertRaisesWithMsg(AuthorizerError,
'no such user %s' % nonexistent_user,
auth.override_user, nonexistent_user,
perm='r')
if self.authorizer_class.__name__ == 'UnixAuthorizer':
auth = self.authorizer_class(allowed_users=[this_user],
require_valid_shell=False)
else:
auth = self.authorizer_class(allowed_users=[this_user])
auth.override_user(this_user, perm='r')
self.assertRaisesWithMsg(AuthorizerError,
'%s is not an allowed user' % another_user,
auth.override_user, another_user, perm='r')
if self.authorizer_class.__name__ == 'UnixAuthorizer':
auth = self.authorizer_class(rejected_users=[this_user],
require_valid_shell=False)
else:
auth = self.authorizer_class(rejected_users=[this_user])
auth.override_user(another_user, perm='r')
self.assertRaisesWithMsg(AuthorizerError,
'%s is not an allowed user' % this_user,
auth.override_user, this_user, perm='r')
self.assertRaisesWithMsg(AuthorizerError,
"can't assign password to anonymous user",
auth.override_user, "anonymous",
password='foo')
# =====================================================================
# --- UNIX authorizer
# =====================================================================
@unittest.skipUnless(POSIX, "UNIX only")
@unittest.skipUnless(UnixAuthorizer is not None,
"UnixAuthorizer class not available")
class TestUnixAuthorizer(_SharedAuthorizerTests, unittest.TestCase):
"""Unix authorizer specific tests."""
authorizer_class = UnixAuthorizer
def setUp(self):
try:
UnixAuthorizer()
except AuthorizerError: # not root
self.skipTest("need root access")
def test_get_perms_anonymous(self):
auth = UnixAuthorizer(
global_perm='elr', anonymous_user=self.get_current_user())
self.assertTrue('e' in auth.get_perms('anonymous'))
self.assertFalse('w' in auth.get_perms('anonymous'))
warnings.filterwarnings("ignore")
auth.override_user('anonymous', perm='w')
warnings.resetwarnings()
self.assertTrue('w' in auth.get_perms('anonymous'))
def test_has_perm_anonymous(self):
auth = UnixAuthorizer(
global_perm='elr', anonymous_user=self.get_current_user())
self.assertTrue(auth.has_perm(self.get_current_user(), 'r'))
self.assertFalse(auth.has_perm(self.get_current_user(), 'w'))
self.assertTrue(auth.has_perm('anonymous', 'e'))
self.assertFalse(auth.has_perm('anonymous', 'w'))
warnings.filterwarnings("ignore")
auth.override_user('anonymous', perm='w')
warnings.resetwarnings()
self.assertTrue(auth.has_perm('anonymous', 'w'))
def test_validate_authentication(self):
# we can only test for invalid credentials
auth = UnixAuthorizer(require_valid_shell=False)
self.assertRaises(AuthenticationFailed,
auth.validate_authentication, '?!foo', '?!foo', None)
auth = UnixAuthorizer(require_valid_shell=True)
self.assertRaises(AuthenticationFailed,
auth.validate_authentication, '?!foo', '?!foo', None)
def test_validate_authentication_anonymous(self):
current_user = self.get_current_user()
auth = UnixAuthorizer(anonymous_user=current_user,
require_valid_shell=False)
self.assertRaises(AuthenticationFailed,
auth.validate_authentication, 'foo', 'passwd', None)
self.assertRaises(
AuthenticationFailed,
auth.validate_authentication, current_user, 'passwd', None)
auth.validate_authentication('anonymous', 'passwd', None)
def test_require_valid_shell(self):
def get_fake_shell_user():
for user in self.get_users():
shell = pwd.getpwnam(user).pw_shell
# On linux fake shell_sample is usually /bin/false, on
# freebsd /usr/sbin/nologin; in case of other
# UNIX variants test needs to be adjusted.
if '/false' in shell or '/nologin' in shell:
return user
self.fail("no user found")
user = get_fake_shell_user()
self.assertRaisesWithMsg(
AuthorizerError,
"user %s has not a valid shell_sample" % user,
UnixAuthorizer, allowed_users=[user])
# commented as it first fails for invalid home
# self.assertRaisesWithMsg(
# ValueError,
# "user %s has not a valid shell_sample" % user,
# UnixAuthorizer, anonymous_user=user)
auth = UnixAuthorizer()
self.assertTrue(auth._has_valid_shell(self.get_current_user()))
self.assertFalse(auth._has_valid_shell(user))
self.assertRaisesWithMsg(AuthorizerError,
"User %s doesn't have a valid shell_sample." % user,
auth.override_user, user, perm='r')
def test_not_root(self):
# UnixAuthorizer is supposed to work only as super user
auth = self.authorizer_class()
try:
auth.impersonate_user('nobody', '')
self.assertRaisesWithMsg(AuthorizerError,
"super user privileges are required",
UnixAuthorizer)
finally:
auth.terminate_impersonation('nobody')
# =====================================================================
# --- Windows authorizer
# =====================================================================
@unittest.skipUnless(WINDOWS, "Windows only")
class TestWindowsAuthorizer(_SharedAuthorizerTests, unittest.TestCase):
"""Windows authorizer specific tests."""
authorizer_class = WindowsAuthorizer
def test_wrong_anonymous_credentials(self):
user = self.get_current_user()
self.assertRaises(Win32ExtError, self.authorizer_class,
anonymous_user=user,
anonymous_password='$|1wrongpasswd')
if __name__ == '__main__':
unittest.main(verbosity=VERBOSITY)
| []
| []
| [
"USERNAME",
"USERPROFILE"
]
| [] | ["USERNAME", "USERPROFILE"] | python | 2 | 0 | |
crimemanagement/asgi.py | """
ASGI config for crimemanagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crimemanagement.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
volttron/platform/control.py | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from __future__ import absolute_import, print_function
import argparse
import collections
import json
import logging
import logging.handlers
import os
import re
import shutil
import sys
import tempfile
import traceback
import uuid
import hashlib
import tarfile
import subprocess
from datetime import timedelta
import requests
import gevent
import gevent.event
from volttron.platform.vip.agent.subsystems.query import Query
from volttron.platform import get_home, get_address
from volttron.platform.messaging.health import Status, STATUS_BAD
from volttron.platform.agent import utils
from volttron.platform.agent.known_identities import CONTROL_CONNECTION, \
CONFIGURATION_STORE
from volttron.platform.vip.agent import Agent as BaseAgent, Core, RPC
from volttron.platform import aip as aipmod
from volttron.platform import config
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.auth import AuthEntry, AuthFile, AuthException
from volttron.platform.keystore import KeyStore, KnownHostsStore
from volttron.platform.vip.socket import Message
from volttron.utils.prompt import prompt_response, y, n, y_or_n
from .vip.agent.errors import VIPError
from volttron.utils.rmq_mgmt import RabbitMQMgmt
from volttron.utils.rmq_setup import check_rabbit_status
from volttron.utils.rmq_config_params import RMQConfig
from requests.packages.urllib3.connection import (ConnectionError,
NewConnectionError)
from volttron.platform.scheduling import periodic
try:
import volttron.restricted
except ImportError:
HAVE_RESTRICTED = False
else:
from volttron.restricted import cgroups
HAVE_RESTRICTED = True
_stdout = sys.stdout
_stderr = sys.stderr
_log = logging.getLogger(os.path.basename(sys.argv[0])
if __name__ == '__main__' else __name__)
message_bus = utils.get_messagebus()
rmq_mgmt = None
CHUNK_SIZE = 4096
class ControlService(BaseAgent):
def __init__(self, aip, agent_monitor_frequency, *args, **kwargs):
tracker = kwargs.pop('tracker', None)
kwargs["enable_store"] = False
super(ControlService, self).__init__(*args, **kwargs)
self._aip = aip
self._tracker = tracker
self.crashed_agents = {}
self.agent_monitor_frequency = int(agent_monitor_frequency)
@Core.receiver('onsetup')
def _setup(self, sender, **kwargs):
if not self._tracker:
return
self.vip.rpc.export(lambda: self._tracker.enabled, 'stats.enabled')
self.vip.rpc.export(self._tracker.enable, 'stats.enable')
self.vip.rpc.export(self._tracker.disable, 'stats.disable')
self.vip.rpc.export(lambda: self._tracker.stats, 'stats.get')
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
_log.debug(" agent monitor frequency is... {}".format(
self.agent_monitor_frequency))
self.core.schedule(periodic(self.agent_monitor_frequency),
self._monitor_agents)
def _monitor_agents(self):
"""
Periodically look for agents that crashed and schedule a restart
attempt. Attempts at most 5 times with increasing interval
between attempts. Sends alert if attempts fail.
"""
# Get status for agents that have been started at least once.
stats = self._aip.status_agents()
for (uid, name, (pid, stat)) in stats:
if stat:
# stat=0 means stopped and stat=None means running
# will always have pid(current/crashed/stopped)
attempt = self.crashed_agents.get(uid, -1) + 1
if attempt < 5:
self.crashed_agents[uid] = attempt
next_restart = utils.get_aware_utc_now() + timedelta(
minutes=attempt*5)
_log.debug("{} stopped unexpectedly. Will attempt to "
"restart at {}".format(name, next_restart))
self.core.schedule(next_restart,
self._restart_agent,
uid, name)
else:
self.send_alert(uid, name)
self.crashed_agents.pop(uid)
def _restart_agent(self, agent_id, agent_name):
"""
Checks if a given agent has crashed. If so attempts to restart it.
If successful removes the agent id from list of crashed agents
:param agent_id:
:param agent_name:
:return:
"""
(id, stat) = self._aip.agent_status(agent_id)
if stat:
# if there is still some error status... attempt restart
# call self.stop to inform router but call aip start to get
# status back
self.stop_agent(agent_id)
(id,stat) = self._aip.start_agent(agent_id)
if stat is None:
# start successful
self.crashed_agents.pop(agent_id)
_log.info("Successfully restarted agent {}".format(agent_name))
else:
_log.info("Restart of {} failed".format(agent_name))
def send_alert(self, agent_id, agent_name):
"""Send an alert for the group, summarizing missing topics.
:param unseen_topics: List of topics that were expected but not received
:type unseen_topics: list
"""
alert_key = "Agent {}({}) stopped unexpectedly".format(agent_name,
agent_id)
context = "Agent {}({}) stopped unexpectedly. Attempts to " \
"restart failed".format(agent_name, agent_id)
status = Status.build(STATUS_BAD, context=context)
self.vip.health.send_alert(alert_key, status)
@RPC.export
def peerlist(self):
return self.vip.peerlist().get(timeout=5)
@RPC.export
def serverkey(self):
q = Query(self.core)
pk = q.query('serverkey').get(timeout=1)
del q
return pk
@RPC.export
def clear_status(self, clear_all=False):
self._aip.clear_status(clear_all)
@RPC.export
def agent_status(self, uuid):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
return self._aip.agent_status(uuid)
@RPC.export
def agent_name(self, uuid):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
return self._aip.agent_name(uuid)
@RPC.export
def agent_version(self, uuid):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
return self._aip.agent_version(uuid)
@RPC.export
def agent_versions(self):
return self._aip.agent_versions()
@RPC.export
def status_agents(self):
return self._aip.status_agents()
@RPC.export
def start_agent(self, uuid):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
self._aip.start_agent(uuid)
@RPC.export
def stop_agent(self, uuid):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
identity = self.agent_vip_identity(uuid)
self._aip.stop_agent(uuid)
# Send message to router that agent is shutting down
frames = [bytes(identity)]
self.core.connection.send_vip(b'', 'agentstop', args=frames, copy=False)
@RPC.export
def restart_agent(self, uuid):
self.stop_agent(uuid)
self.start_agent(uuid)
@RPC.export
def shutdown(self):
self._aip.shutdown()
@RPC.export
def stop_platform(self):
# XXX: Restrict call as it kills the process
self.core.connection.send_vip(b'', b'quit')
@RPC.export
def list_agents(self):
tag = self._aip.agent_tag
priority = self._aip.agent_priority
return [{'name': name, 'uuid': uuid,
'tag': tag(uuid), 'priority': priority(uuid),
'identity': self.agent_vip_identity(uuid)}
for uuid, name in self._aip.list_agents().iteritems()]
@RPC.export
def tag_agent(self, uuid, tag):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
if not isinstance(tag, (type(None), basestring)):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'tag';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
return self._aip.tag_agent(uuid, tag)
@RPC.export
def remove_agent(self, uuid, remove_auth=True):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
identity = self.agent_vip_identity(uuid)
frames = [bytes(identity)]
# Send message to router that agent is shutting down
self.core.connection.send_vip(b'', 'agentstop', args=frames, copy=False)
self._aip.remove_agent(uuid, remove_auth=remove_auth)
@RPC.export
def prioritize_agent(self, uuid, priority='50'):
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
if not isinstance(priority, (type(None), basestring)):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string or null for 'priority';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
self._aip.prioritize_agent(uuid, priority)
@RPC.export
def agent_vip_identity(self, uuid):
""" Lookup the agent's vip identity based upon it's uuid.
@param uuid:
@return:
"""
if not isinstance(uuid, basestring):
identity = bytes(self.vip.rpc.context.vip_message.peer)
raise TypeError("expected a string for 'uuid';"
"got {!r} from identity: {}".format(
type(uuid).__name__, identity))
return self._aip.agent_identity(uuid)
@RPC.export
def get_all_agent_publickeys(self):
"""
RPC method to retrieve the public keys of all of the agents installed
on the VOLTTRON instance.
This method does not differentiate between running and not running
agents.
.. note::
This method will only retrieve a publickey for an installed agents.
It is recommended that dynamic agents use the context of the
containing agent's publickey for connections to external instances.
:return: mapping of identity to agent publickey
:rtype: dict
"""
id_map = self._aip.get_agent_identity_to_uuid_mapping()
retmap = {}
for id, uuid in id_map.items():
retmap[id] = self._aip.get_agent_keystore(uuid).public
return retmap
@RPC.export
def install_agent_local(self, filename, vip_identity=None, publickey=None,
secretkey=None):
return self._aip.install_agent(filename, vip_identity=vip_identity,
publickey=publickey,
secretkey=secretkey)
@RPC.export
def install_agent(self, filename, channel_name, vip_identity=None,
publickey=None, secretkey=None):
"""
Installs an agent on the instance instance.
The installation of an agent through this method involves sending
the binary data of the agent file through a channel. The following
example is the protocol for sending the agent across the wire:
Example Protocol:
.. code-block:: python
# client creates channel to this agent (control)
channel = agent.vip.channel('control', 'channel_name')
# Begin sending data
sha512 = hashlib.sha512()
while True:
request, file_offset, chunk_size = channel.recv_multipart()
# Control has all of the file. Send hash for for it to verify.
if request == b'checksum':
channel.send(hash)
assert request == b'fetch'
# send a chunk of the file
file_offset = int(file_offset)
chunk_size = int(chunk_size)
file.seek(file_offset)
data = file.read(chunk_size)
sha512.update(data)
channel.send(data)
agent_uuid = agent_uuid.get(timeout=10)
# close and delete the channel
channel.close(linger=0)
del channel
:param:string:filename:
The name of the agent packaged file that is being written.
:param:string:channel_name:
The name of the channel that the agent file will be sent on.
:param:string:publickey:
Encoded public key the installed agent will use
:param:string:secretkey:
Encoded secret key the installed agent will use
"""
peer = bytes(self.vip.rpc.context.vip_message.peer)
channel = self.vip.channel(peer, channel_name)
try:
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, os.path.basename(filename))
store = open(path, 'wb')
file_offset = 0
sha512 = hashlib.sha512()
try:
while True:
# request a chunk of the file
channel.send_multipart([
b'fetch',
bytes(file_offset),
bytes(CHUNK_SIZE)
])
# get the requested data
with gevent.Timeout(30):
data = channel.recv()
sha512.update(data)
store.write(data)
size = len(data)
file_offset += size
# let volttron-ctl know that we have everything
if size < CHUNK_SIZE:
channel.send_multipart([b'checksum', b'', b''])
with gevent.Timeout(30):
checksum = channel.recv()
assert checksum == sha512.digest()
break
except AssertionError:
_log.warning("Checksum mismatch on received file")
raise
except gevent.Timeout:
_log.warning("Gevent timeout trying to receive data")
raise
finally:
store.close()
_log.debug('Closing channel on server')
channel.close(linger=0)
del channel
agent_uuid = self._aip.install_agent(path,
vip_identity=vip_identity,
publickey=publickey,
secretkey=secretkey)
return agent_uuid
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def log_to_file(file, level=logging.WARNING,
handler_class=logging.StreamHandler):
'''Direct log output to a file (or something like one).'''
handler = handler_class(file)
handler.setLevel(level)
handler.setFormatter(utils.AgentFormatter(
'%(asctime)s %(composite_name)s %(levelname)s: %(message)s'))
root = logging.getLogger()
root.setLevel(level)
root.addHandler(handler)
Agent = collections.namedtuple('Agent', 'name tag uuid vip_identity')
def _list_agents(aip):
return [Agent(name, aip.agent_tag(uuid), uuid, aip.agent_identity(uuid))
for uuid, name in aip.list_agents().iteritems()]
def escape(pattern):
strings = re.split(r'([*?])', pattern)
if len(strings) == 1:
return re.escape(pattern), False
return ''.join('.*' if s == '*' else '.' if s == '?' else
s if s in [r'\?', r'\*'] else re.escape(s)
for s in strings), True
def filter_agents(agents, patterns, opts):
by_name, by_tag, by_uuid = opts.by_name, opts.by_tag, opts.by_uuid
for pattern in patterns:
regex, _ = escape(pattern)
result = set()
if not (by_uuid or by_name or by_tag):
reobj = re.compile(regex)
matches = [agent for agent in agents if reobj.match(agent.uuid)]
if len(matches) == 1:
result.update(matches)
else:
reobj = re.compile(regex + '$')
if by_uuid:
result.update(
agent for agent in agents if reobj.match(agent.uuid))
if by_name:
result.update(
agent for agent in agents if reobj.match(agent.name))
if by_tag:
result.update(
agent for agent in agents if reobj.match(agent.tag or ''))
yield pattern, result
def filter_agent(agents, pattern, opts):
return next(filter_agents(agents, [pattern], opts))[1]
def backup_agent_data(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.sep) # os.path.basename(source_dir))
def restore_agent_data_from_tgz(source_file, output_dir):
# Open tarfile
with tarfile.open(mode="r:gz", fileobj=file(source_file)) as tar:
tar.extractall(output_dir)
def find_agent_data_dir(opts, agent_uuid):
agent_data_dir = None
for x in os.listdir(opts.aip.agent_dir(agent_uuid)):
if x.endswith("agent-data"):
agent_data_dir = os.path.join(opts.aip.agent_dir(agent_uuid), x)
break
return agent_data_dir
def upgrade_agent(opts):
publickey = None
secretkey = None
identity = opts.vip_identity
if not identity:
raise ValueError("Missing required VIP IDENTITY option")
identity_to_uuid = opts.aip.get_agent_identity_to_uuid_mapping()
agent_uuid = identity_to_uuid.get(identity, None)
backup_agent_file = "/tmp/{}.tar.gz".format(agent_uuid)
if agent_uuid:
agent_data_dir = find_agent_data_dir(opts, agent_uuid)
if agent_data_dir:
backup_agent_data(backup_agent_file, agent_data_dir)
keystore = opts.aip.get_agent_keystore(agent_uuid)
publickey = keystore.public
secretkey = keystore.secret
_stdout.write('Removing previous version of agent "{}"\n'
.format(identity))
opts.connection.call('remove_agent', agent_uuid, remove_auth=False)
else:
_stdout.write(('Could not find agent with VIP IDENTITY "{}". '
'Installing as new agent\n').format(identity))
if secretkey is None or publickey is None:
publickey = None
secretkey = None
def restore_agents_data(agent_uuid):
# if we are upgrading transfer the old data on.
if os.path.exists(backup_agent_file):
new_agent_data_dir = find_agent_data_dir(opts, agent_uuid)
restore_agent_data_from_tgz(backup_agent_file, new_agent_data_dir)
os.remove(backup_agent_file)
install_agent(opts, publickey=publickey, secretkey=secretkey,
callback=restore_agents_data)
def install_agent(opts, publickey=None, secretkey=None, callback=None):
aip = opts.aip
filename = opts.wheel
tag = opts.tag
vip_identity = opts.vip_identity
if opts.vip_address.startswith('ipc://'):
_log.info("Installing wheel locally without channel subsystem")
filename = config.expandall(filename)
agent_uuid = opts.connection.call('install_agent_local',
filename,
vip_identity=vip_identity,
publickey=publickey,
secretkey=secretkey)
if tag:
opts.connection.call('tag_agent', agent_uuid, tag)
else:
try:
_log.debug('Creating channel for sending the agent.')
channel_name = str(uuid.uuid4())
channel = opts.connection.server.vip.channel('control',
channel_name)
_log.debug('calling control install agent.')
agent_uuid = opts.connection.call_no_get('install_agent',
filename,
channel_name,
vip_identity=vip_identity,
publickey=publickey,
secretkey=secretkey)
_log.debug('Sending wheel to control')
sha512 = hashlib.sha512()
with open(filename, 'rb') as wheel_file_data:
while True:
# get a request
with gevent.Timeout(60):
request, file_offset, chunk_size = channel.recv_multipart()
if request == b'checksum':
channel.send(sha512.digest())
break
assert request == b'fetch'
# send a chunk of the file
file_offset = int(file_offset)
chunk_size = int(chunk_size)
wheel_file_data.seek(file_offset)
data = wheel_file_data.read(chunk_size)
sha512.update(data)
channel.send(data)
agent_uuid = agent_uuid.get(timeout=10)
except Exception as exc:
if opts.debug:
traceback.print_exc()
_stderr.write(
'{}: error: {}: {}\n'.format(opts.command, exc, filename))
return 10
else:
if tag:
opts.connection.call('tag_agent',
agent_uuid,
tag)
finally:
_log.debug('closing channel')
channel.close(linger=0)
del channel
name = opts.connection.call('agent_name', agent_uuid)
_stdout.write('Installed {} as {} {}\n'.format(filename, agent_uuid, name))
# Need to use a callback here rather than a return value. I am not 100%
# sure why this is the reason for allowing our tests to pass.
if callback:
callback(agent_uuid)
def tag_agent(opts):
agents = filter_agent(_list_agents(opts.aip), opts.agent, opts)
if len(agents) != 1:
if agents:
msg = 'multiple agents selected'
else:
msg = 'agent not found'
_stderr.write(
'{}: error: {}: {}\n'.format(opts.command, msg, opts.agent))
return 10
agent, = agents
if opts.tag:
_stdout.write('Tagging {} {}\n'.format(agent.uuid, agent.name))
opts.aip.tag_agent(agent.uuid, opts.tag)
elif opts.remove:
if agent.tag is not None:
_stdout.write(
'Removing tag for {} {}\n'.format(agent.uuid, agent.name))
opts.aip.tag_agent(agent.uuid, None)
else:
if agent.tag is not None:
_stdout.writelines([agent.tag, '\n'])
def remove_agent(opts, remove_auth=True):
agents = _list_agents(opts.aip)
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
elif len(match) > 1 and not opts.force:
_stderr.write(
'{}: error: pattern returned multiple agents: {}\n'.format(
opts.command, pattern))
_stderr.write(
'Use -f or --force to force removal of multiple agents.\n')
return 10
for agent in match:
_stdout.write('Removing {} {}\n'.format(agent.uuid, agent.name))
opts.connection.call('remove_agent', agent.uuid,
remove_auth=remove_auth)
def _calc_min_uuid_length(agents):
n = 0
for agent1 in agents:
for agent2 in agents:
if agent1 is agent2:
continue
common_len = len(os.path.commonprefix([agent1.uuid, agent2.uuid]))
if common_len > n:
n = common_len
return n + 1
def list_agents(opts):
def get_priority(agent):
return opts.aip.agent_priority(agent.uuid) or ''
_show_filtered_agents(opts, 'PRI', get_priority)
def list_peers(opts):
conn = opts.connection
peers = sorted(conn.call('peerlist'))
for peer in peers:
sys.stdout.write("{}\n".format(peer))
def status_agents(opts):
agents = {agent.uuid: agent for agent in _list_agents(opts.aip)}
status = {}
for uuid, name, stat in opts.connection.call('status_agents'):
try:
agent = agents[uuid]
except KeyError:
agents[uuid] = agent = Agent(name, None, uuid)
status[uuid] = stat
agents = agents.values()
def get_status(agent):
try:
pid, stat = status[agent.uuid]
except KeyError:
pid = stat = None
if stat is not None:
return str(stat)
if pid:
return 'running [{}]'.format(pid)
return ''
def get_health(agent):
try:
return opts.connection.server.vip.rpc.call(agent.vip_identity, 'health.get_status_json').get(timeout=4)[
'status']
except VIPError:
return ''
_show_filtered_agents_status(opts, get_status, get_health, agents)
def agent_health(opts):
agents = {agent.uuid: agent for agent in _list_agents(opts.aip)}.values()
agents = get_filtered_agents(opts, agents)
if not agents:
_stderr.write('No installed Agents found\n')
return
agent = agents.pop()
try:
_stderr.write(json.dumps(
opts.connection.server.vip.rpc.call(agent.vip_identity, 'health.get_status_json').get(timeout=4),
indent=4) + '\n'
)
except VIPError:
print("Agent {} is not running on the Volttron platform.".format(agent.uuid))
def clear_status(opts):
opts.connection.call('clear_status', opts.clear_all)
def enable_agent(opts):
agents = _list_agents(opts.aip)
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
for agent in match:
_stdout.write('Enabling {} {} with priority {}\n'.format(
agent.uuid, agent.name, opts.priority))
opts.aip.prioritize_agent(agent.uuid, opts.priority)
def disable_agent(opts):
agents = _list_agents(opts.aip)
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
for agent in match:
priority = opts.aip.agent_priority(agent.uuid)
if priority is not None:
_stdout.write(
'Disabling {} {}\n'.format(agent.uuid, agent.name))
opts.aip.prioritize_agent(agent.uuid, None)
def start_agent(opts):
call = opts.connection.call
agents = _list_agents(opts.aip)
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
for agent in match:
pid, status = call('agent_status', agent.uuid)
if pid is None or status is not None:
_stdout.write(
'Starting {} {}\n'.format(agent.uuid, agent.name))
call('start_agent', agent.uuid)
def stop_agent(opts):
call = opts.connection.call
agents = _list_agents(opts.aip)
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
for agent in match:
pid, status = call('agent_status', agent.uuid)
if pid and status is None:
_stdout.write(
'Stopping {} {}\n'.format(agent.uuid, agent.name))
call('stop_agent', agent.uuid)
def restart_agent(opts):
stop_agent(opts)
start_agent(opts)
def run_agent(opts):
call = opts.connection.call
for directory in opts.directory:
call('run_agent', directory)
def shutdown_agents(opts):
if 'rmq' == utils.get_messagebus():
if not check_rabbit_status():
rmq_cfg = RMQConfig()
wait_period = rmq_cfg.reconnect_delay() if rmq_cfg.reconnect_delay() < 60 else 60
_stderr.write(
'RabbitMQ server is not running.\n'
'Waiting for {} seconds for possible reconnection and to perform normal shutdown\n'.format(wait_period))
gevent.sleep(wait_period)
if not check_rabbit_status():
_stderr.write(
'RabbitMQ server is still not running.\nShutting down the platform forcefully\n')
opts.aip.brute_force_platform_shutdown()
return
opts.connection.call('shutdown')
_log.debug("Calling stop_platform")
if opts.platform:
opts.connection.notify('stop_platform')
def create_cgroups(opts):
try:
cgroups.setup(user=opts.user, group=opts.group)
except ValueError as exc:
_stderr.write('{}: error: {}\n'.format(opts.command, exc))
return os.EX_NOUSER
def _send_agent(connection, peer, path):
wheel = open(path, 'rb')
channel = connection.vip.channel(peer)
def send():
try:
# Wait for peer to open compliment channel
channel.recv()
while True:
data = wheel.read(8192)
channel.send(data)
if not data:
break
# Wait for peer to signal all data received
channel.recv()
finally:
wheel.close()
channel.close(linger=0)
result = connection.vip.rpc.call(
peer, 'install_agent', os.path.basename(path), channel.name)
task = gevent.spawn(send)
result.rawlink(lambda glt: task.kill(block=False))
return result
def send_agent(opts):
connection = opts.connection
for wheel in opts.wheel:
uuid = _send_agent(connection.server, connection.peer, wheel).get()
connection.call('start_agent', uuid)
_stdout.write('Agent {} started as {}\n'.format(wheel, uuid))
def gen_keypair(opts):
keypair = KeyStore.generate_keypair_dict()
_stdout.write('{}\n'.format(json.dumps(keypair, indent=2)))
def add_server_key(opts):
store = KnownHostsStore()
store.add(opts.host, opts.serverkey)
_stdout.write('server key written to {}\n'.format(store.filename))
def list_known_hosts(opts):
store = KnownHostsStore()
entries = store.load()
if entries:
_print_two_columns(entries, 'HOST', 'CURVE KEY')
else:
_stdout.write('No entries in {}\n'.format(store.filename))
def remove_known_host(opts):
store = KnownHostsStore()
store.remove(opts.host)
_stdout.write('host "{}" removed from {}\n'.format(opts.host,
store.filename))
def do_stats(opts):
call = opts.connection.call
if opts.op == 'status':
_stdout.write(
'%sabled\n' % ('en' if call('stats.enabled') else 'dis'))
elif opts.op in ['dump', 'pprint']:
stats = call('stats.get')
if opts.op == 'pprint':
import pprint
pprint.pprint(stats, _stdout)
else:
_stdout.writelines([str(stats), '\n'])
else:
call('stats.' + opts.op)
_stdout.write(
'%sabled\n' % ('en' if call('stats.enabled') else 'dis'))
def show_serverkey(opts):
"""
write serverkey to standard out.
return 0 if success, 1 if false
"""
q = Query(opts.connection.server.core)
pk = q.query('serverkey').get(timeout=2)
del q
if pk is not None:
_stdout.write('%s\n' % pk)
return 0
return 1
def _get_auth_file(volttron_home):
path = os.path.join(volttron_home, 'auth.json')
return AuthFile(path)
def _print_two_columns(dict_, key_name, value_name):
padding = 2
key_lengths = [len(key) for key in dict_] + [len(key_name)]
max_key_len = max(key_lengths) + padding
_stdout.write('{}{}{}\n'.format(key_name,
' ' * (max_key_len - len(key_name)), value_name))
_stdout.write('{}{}{}\n'.format('-' * len(key_name),
' ' * (max_key_len - len(key_name)),
'-' * len(value_name)))
for key in sorted(dict_):
value = dict_[key]
if isinstance(value, list):
value = sorted(value)
_stdout.write('{}{}{}\n'.format(key,
' ' * (max_key_len - len(key)), value))
def list_auth(opts, indices=None):
auth_file = _get_auth_file(opts.volttron_home)
entries = auth_file.read_allow_entries()
print_out = []
if entries:
for index, entry in enumerate(entries):
if indices is None or index in indices:
_stdout.write('\nINDEX: {}\n'.format(index))
_stdout.write(
'{}\n'.format(json.dumps(vars(entry), indent=2)))
else:
_stdout.write('No entries in {}\n'.format(auth_file.auth_file))
def _ask_for_auth_fields(domain=None, address=None, user_id=None,
capabilities=None, roles=None, groups=None,
mechanism='CURVE', credentials=None, comments=None,
enabled=True, **kwargs):
class Asker(object):
def __init__(self):
self._fields = collections.OrderedDict()
def add(self, name, default=None, note=None, callback=lambda x: x,
validate=lambda x, y: (True, '')):
self._fields[name] = {'note': note, 'default': default,
'callback': callback, 'validate': validate}
def ask(self):
for name in self._fields:
note = self._fields[name]['note']
default = self._fields[name]['default']
callback = self._fields[name]['callback']
validate = self._fields[name]['validate']
if isinstance(default, list):
default_str = '{}'.format(','.join(default))
elif default is None:
default_str = ''
else:
default_str = default
note = '({}) '.format(note) if note else ''
question = '{} {}[{}]: '.format(name, note, default_str)
valid = False
while not valid:
response = raw_input(question).strip()
if response == '':
response = default
if response == 'clear':
if _ask_yes_no('Do you want to clear this field?'):
response = None
valid, msg = validate(response, self._fields)
if not valid:
_stderr.write('{}\n'.format(msg))
self._fields[name]['response'] = callback(response)
return {k: self._fields[k]['response'] for k in self._fields}
def to_true_or_false(response):
if isinstance(response, basestring):
return {'true': True, 'false': False}[response.lower()]
return response
def is_true_or_false(x, fields):
if x is not None:
if isinstance(x, bool) or x.lower() in ['true', 'false']:
return True, None
return False, 'Please enter True or False'
def valid_creds(creds, fields):
try:
mechanism = fields['mechanism']['response']
AuthEntry.valid_credentials(creds, mechanism=mechanism)
except AuthException as e:
return False, e.message
return True, None
def valid_mech(mech, fields):
try:
AuthEntry.valid_mechanism(mech)
except AuthException as e:
return False, e.message
return True, None
asker = Asker()
asker.add('domain', domain)
asker.add('address', address)
asker.add('user_id', user_id)
asker.add('capabilities', capabilities,
'delimit multiple entries with comma', _comma_split)
asker.add('roles', roles, 'delimit multiple entries with comma',
_comma_split)
asker.add('groups', groups, 'delimit multiple entries with comma',
_comma_split)
asker.add('mechanism', mechanism, validate=valid_mech)
asker.add('credentials', credentials, validate=valid_creds)
asker.add('comments', comments)
asker.add('enabled', enabled, callback=to_true_or_false,
validate=is_true_or_false)
return asker.ask()
def _comma_split(line):
if not isinstance(line, basestring):
return line
line = line.strip()
if not line:
return []
return [word.strip() for word in line.split(',')]
def add_auth(opts):
"""Add authorization entry.
If all options are None, then use interactive 'wizard.'
"""
fields = {
"domain": opts.domain,
"address": opts.address,
"mechanism": opts.mechanism,
"credentials": opts.credentials,
"user_id": opts.user_id,
"groups": _comma_split(opts.groups),
"roles": _comma_split(opts.roles),
"capabilities": _comma_split(opts.capabilities),
"comments": opts.comments,
}
if any(fields.values()):
# Remove unspecified options so the default parameters are used
fields = {k: v for k, v in fields.items() if v}
fields['enabled'] = not opts.disabled
entry = AuthEntry(**fields)
else:
# No options were specified, use interactive wizard
responses = _ask_for_auth_fields()
entry = AuthEntry(**responses)
if opts.add_known_host:
if entry.address is None:
raise ValueError('host (--address) is required when '
'--add-known-host is specified')
if entry.credentials is None:
raise ValueError('serverkey (--credentials) is required when '
'--add-known-host is specified')
opts.host = entry.address
opts.serverkey = entry.credentials
add_server_key(opts)
auth_file = _get_auth_file(opts.volttron_home)
try:
auth_file.add(entry, overwrite=False)
_stdout.write('added entry {}\n'.format(entry))
except AuthException as err:
_stderr.write('ERROR: %s\n' % err.message)
def _ask_yes_no(question, default='yes'):
yes = set(['yes', 'ye', 'y'])
no = set(['no', 'n'])
y = 'y'
n = 'n'
if default in yes:
y = 'Y'
elif default in no:
n = 'N'
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
choice = raw_input('{} [{}/{}] '.format(question, y, n)).lower()
if choice == '':
choice = default
if choice in yes:
return True
if choice in no:
return False
_stderr.write("Please respond with 'yes' or 'no'\n")
def remove_auth(opts):
auth_file = _get_auth_file(opts.volttron_home)
entry_count = len(auth_file.read_allow_entries())
for i in opts.indices:
if i < 0 or i >= entry_count:
_stderr.write('ERROR: invalid index {}\n'.format(i))
return
_stdout.write('This action will delete the following:\n')
list_auth(opts, opts.indices)
if not _ask_yes_no('Do you wish to delete?'):
return
try:
auth_file.remove_by_indices(opts.indices)
if len(opts.indices) > 1:
msg = 'removed entries at indices {}'.format(opts.indices)
else:
msg = msg = 'removed entry at index {}'.format(opts.indices)
_stdout.write(msg + '\n')
except AuthException as err:
_stderr.write('ERROR: %s\n' % err.message)
def update_auth(opts):
auth_file = _get_auth_file(opts.volttron_home)
entries = auth_file.read_allow_entries()
try:
if opts.index < 0:
raise IndexError
entry = entries[opts.index]
_stdout.write('(For any field type "clear" to clear the value.)\n')
response = _ask_for_auth_fields(**entry.__dict__)
updated_entry = AuthEntry(**response)
auth_file.update_by_index(updated_entry, opts.index)
_stdout.write('updated entry at index {}\n'.format(opts.index))
except IndexError:
_stderr.write('ERROR: invalid index %s\n' % opts.index)
except AuthException as err:
_stderr.write('ERROR: %s\n' % err.message)
def add_role(opts):
auth_file = _get_auth_file(opts.volttron_home)
roles = auth_file.read()[2]
if opts.role in roles:
_stderr.write('role "{}" already exists\n'.format(opts.role))
return
roles[opts.role] = list(set(opts.capabilities))
auth_file.set_roles(roles)
_stdout.write('added role "{}"\n'.format(opts.role))
def list_roles(opts):
auth_file = _get_auth_file(opts.volttron_home)
roles = auth_file.read()[2]
_print_two_columns(roles, 'ROLE', 'CAPABILITIES')
def update_role(opts):
auth_file = _get_auth_file(opts.volttron_home)
roles = auth_file.read()[2]
if opts.role not in roles:
_stderr.write('role "{}" does not exist\n'.format(opts.role))
return
caps = roles[opts.role]
if opts.remove:
roles[opts.role] = list(set(caps) - set(opts.capabilities))
else:
roles[opts.role] = list(set(caps) | set(opts.capabilities))
auth_file.set_roles(roles)
_stdout.write('updated role "{}"\n'.format(opts.role))
def remove_role(opts):
auth_file = _get_auth_file(opts.volttron_home)
roles = auth_file.read()[2]
if opts.role not in roles:
_stderr.write('role "{}" does not exist\n'.format(opts.role))
return
del roles[opts.role]
auth_file.set_roles(roles)
_stdout.write('removed role "{}"\n'.format(opts.role))
def add_group(opts):
auth_file = _get_auth_file(opts.volttron_home)
groups = auth_file.read()[1]
if opts.group in groups:
_stderr.write('group "{}" already exists\n'.format(opts.group))
return
groups[opts.group] = list(set(opts.roles))
auth_file.set_groups(groups)
_stdout.write('added group "{}"\n'.format(opts.group))
def list_groups(opts):
auth_file = _get_auth_file(opts.volttron_home)
groups = auth_file.read()[1]
_print_two_columns(groups, 'GROUPS', 'ROLES')
def update_group(opts):
auth_file = _get_auth_file(opts.volttron_home)
groups = auth_file.read()[1]
if opts.group not in groups:
_stderr.write('group "{}" does not exist\n'.format(opts.group))
return
roles = groups[opts.group]
if opts.remove:
groups[opts.group] = list(set(roles) - set(opts.roles))
else:
groups[opts.group] = list(set(roles) | set(opts.roles))
auth_file.set_groups(groups)
_stdout.write('updated group "{}"\n'.format(opts.group))
def remove_group(opts):
auth_file = _get_auth_file(opts.volttron_home)
groups = auth_file.read()[1]
if opts.group not in groups:
_stderr.write('group "{}" does not exist\n'.format(opts.group))
return
del groups[opts.group]
auth_file.set_groups(groups)
_stdout.write('removed group "{}"\n'.format(opts.group))
def get_filtered_agents(opts, agents=None):
if opts.pattern:
filtered = set()
for pattern, match in filter_agents(agents, opts.pattern, opts):
if not match:
_stderr.write(
'{}: error: agent not found: {}\n'.format(opts.command,
pattern))
filtered |= match
agents = list(filtered)
return agents
def _show_filtered_agents(opts, field_name, field_callback, agents=None):
"""Provides generic way to filter and display agent information.
The agents will be filtered by the provided opts.pattern and the
following fields will be displayed:
* UUID (or part of the UUID)
* agent name
* VIP identiy
* tag
* field_name
@param:Namespace:opts:
Options from argparse
@param:string:field_name:
Name of field to display about agents
@param:function:field_callback:
Function that takes an Agent as an argument and returns data
to display
@param:list:agents:
List of agents to filter and display
"""
if not agents:
agents = _list_agents(opts.aip)
agents = get_filtered_agents(opts, agents)
if not agents:
_stderr.write('No installed Agents found\n')
return
agents.sort()
if not opts.min_uuid_len:
n = 36
else:
n = max(_calc_min_uuid_length(agents), opts.min_uuid_len)
name_width = max(5, max(len(agent.name) for agent in agents))
tag_width = max(3, max(len(agent.tag or '') for agent in agents))
identity_width = max(3, max(len(agent.vip_identity or '') for agent in agents))
fmt = '{} {:{}} {:{}} {:{}} {:>6}\n'
_stderr.write(
fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width,
'TAG', tag_width, field_name))
for agent in agents:
_stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width,
agent.vip_identity, identity_width,
agent.tag or '', tag_width,
field_callback(agent)))
def _show_filtered_agents_status(opts, status_callback, health_callback, agents=None):
"""Provides generic way to filter and display agent information.
The agents will be filtered by the provided opts.pattern and the
following fields will be displayed:
* UUID (or part of the UUID)
* agent name
* VIP identiy
* tag
* field_name
@param:Namespace:opts:
Options from argparse
@param:string:field_name:
Name of field to display about agents
@param:function:field_callback:
Function that takes an Agent as an argument and returns data
to display
@param:list:agents:
List of agents to filter and display
"""
if not agents:
agents = _list_agents(opts.aip)
agents = get_filtered_agents(opts, agents)
if not agents:
_stderr.write('No installed Agents found\n')
return
agents.sort()
if not opts.min_uuid_len:
n = 36
else:
n = max(_calc_min_uuid_length(agents), opts.min_uuid_len)
name_width = max(5, max(len(agent.name) for agent in agents))
tag_width = max(3, max(len(agent.tag or '') for agent in agents))
identity_width = max(3, max(len(agent.vip_identity or '') for agent in agents))
fmt = '{} {:{}} {:{}} {:{}} {:>6} {:>15}\n'
_stderr.write(
fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width,
'TAG', tag_width, 'STATUS', 'HEALTH'))
fmt = '{} {:{}} {:{}} {:{}} {:<15} {:<}\n'
for agent in agents:
_stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width,
agent.vip_identity, identity_width,
agent.tag or '', tag_width,
status_callback(agent), health_callback(agent)))
def get_agent_publickey(opts):
def get_key(agent):
return opts.aip.get_agent_keystore(agent.uuid).public
_show_filtered_agents(opts, 'PUBLICKEY', get_key)
# XXX: reimplement over VIP
# def send_agent(opts):
# _log.debug("send_agent: "+ str(opts))
# ssh_dir = os.path.join(opts.volttron_home, 'ssh')
# _log.debug('ssh_dir: ' + ssh_dir)
# try:
# host_key, client = comms.client(ssh_dir, opts.host, opts.port)
# except (OSError, IOError, PasswordRequiredException, SSHException) as exc:
# if opts.debug:
# traceback.print_exc()
# _stderr.write('{}: error: {}\n'.format(opts.command, exc))
# if isinstance(exc, OSError):
# return os.EX_OSERR
# if isinstance(exc, IOError):
# return os.EX_IOERR
# return os.EX_SOFTWARE
# if host_key is None:
# _stderr.write('warning: no public key found for remote host\n')
# with client:
# for wheel in opts.wheel:
# with open(wheel) as file:
# client.send_and_start_agent(file)
def add_config_to_store(opts):
opts.connection.peer = CONFIGURATION_STORE
call = opts.connection.call
file_contents = opts.infile.read()
call("manage_store", opts.identity, opts.name, file_contents, config_type=opts.config_type)
def delete_config_from_store(opts):
opts.connection.peer = CONFIGURATION_STORE
call = opts.connection.call
if opts.delete_store:
call("manage_delete_store", opts.identity)
return
if opts.name is None:
_stderr.write('ERROR: must specify a configuration when not deleting entire store\n')
return
call("manage_delete_config", opts.identity, opts.name)
def list_store(opts):
opts.connection.peer = CONFIGURATION_STORE
call = opts.connection.call
results = []
if opts.identity is None:
results = call("manage_list_stores")
else:
results = call("manage_list_configs", opts.identity)
for item in results:
_stdout.write(item + "\n")
def get_config(opts):
opts.connection.peer = CONFIGURATION_STORE
call = opts.connection.call
results = call("manage_get", opts.identity, opts.name, raw=opts.raw)
if opts.raw:
_stdout.write(results)
else:
if isinstance(results, str):
_stdout.write(results)
else:
_stdout.write(json.dumps(results, indent=2))
_stdout.write("\n")
def edit_config(opts):
opts.connection.peer = CONFIGURATION_STORE
call = opts.connection.call
if opts.new_config:
config_type = opts.config_type
raw_data = ''
else:
try:
results = call("manage_get_metadata", opts.identity, opts.name)
config_type = results["type"]
raw_data = results["data"]
except RemoteError as e:
if "No configuration file" not in e.message:
raise
config_type = opts.config_type
raw_data = ''
# Write raw data to temp file
# This will not work on Windows, FYI
with tempfile.NamedTemporaryFile(suffix=".txt") as f:
f.write(raw_data)
f.flush()
success = True
try:
# do not use utils.execute_command as we don't want set stdout to
# subprocess.PIPE
subprocess.check_call([opts.editor, f.name])
except subprocess.CalledProcessError as e:
_stderr.write("Editor returned with code {}. Changes not committed.\n".format(e.returncode))
success = False
if not success:
return
f.seek(0)
new_raw_data = f.read()
if new_raw_data == raw_data:
_stderr.write("No changes detected.\n")
return
call("manage_store", opts.identity, opts.name, new_raw_data, config_type=config_type)
class ControlConnection(object):
def __init__(self, address, peer='control',
publickey=None, secretkey=None, serverkey=None):
self.address = address
self.peer = peer
message_bus = utils.get_messagebus()
self._server = BaseAgent(address=self.address, publickey=publickey,
secretkey=secretkey, serverkey=serverkey,
enable_store=False,
identity=CONTROL_CONNECTION,
message_bus=message_bus,
enable_channel=True)
self._greenlet = None
@property
def server(self):
if self._greenlet is None:
event = gevent.event.Event()
self._greenlet = gevent.spawn(self._server.core.run, event)
event.wait()
return self._server
def call(self, method, *args, **kwargs):
return self.server.vip.rpc.call(
self.peer, method, *args, **kwargs).get()
def call_no_get(self, method, *args, **kwargs):
return self.server.vip.rpc.call(
self.peer, method, *args, **kwargs)
def notify(self, method, *args, **kwargs):
return self.server.vip.rpc.notify(
self.peer, method, *args, **kwargs)
def kill(self, *args, **kwargs):
if self._greenlet is not None:
self._greenlet.kill(*args, **kwargs)
def priority(value):
n = int(value)
if not 0 <= n < 100:
raise ValueError('invalid priority (0 <= n < 100): {}'.format(n))
return '{:02}'.format(n)
def get_keys(opts):
'''Gets keys from keystore and known-hosts store'''
hosts = KnownHostsStore()
serverkey = hosts.serverkey(opts.vip_address)
key_store = KeyStore()
publickey = key_store.public
secretkey = key_store.secret
return {'publickey': publickey, 'secretkey': secretkey,
'serverkey': serverkey}
# RabbitMQ management methods
def add_vhost(opts):
try:
rmq_mgmt.create_vhost(opts.vhost)
except requests.exceptions.HTTPError as e:
_stdout.write("Error adding a Virtual Host: {} \n".format(opts.vhost))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def add_user(opts):
rmq_mgmt.create_user(opts.user, opts.pwd)
permissions = dict(configure="", read="", write="")
read = _ask_yes_no("Do you want to set READ permission ")
write = _ask_yes_no("Do you want to set WRITE permission ")
configure = _ask_yes_no("Do you want to set CONFIGURE permission ")
if read:
permissions['read'] = ".*"
if write:
permissions['write'] = ".*"
if configure:
permissions['configure'] = ".*"
try:
rmq_mgmt.set_user_permissions(permissions, opts.user)
except requests.exceptions.HTTPError as e:
_stdout.write("Error Setting User permissions : {} \n".format(opts.user))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def add_exchange(opts):
if opts.type not in ['topic', 'fanout', 'direct']:
print("Unknown exchange type. Valid exchange types are topic or fanout or direct")
return
durable = _ask_yes_no("Do you want exchange to be durable ")
auto_delete = _ask_yes_no("Do you want exchange to be auto deleted ")
alternate = _ask_yes_no("Do you want alternate exchange ")
properties = dict(durable=durable, type=opts.type, auto_delete=auto_delete)
try:
if alternate:
alternate_exch = opts.name + 'alternate'
properties['alternate-exchange'] = alternate_exch
# create alternate exchange
new_props = dict(durable=durable, type='fanout', auto_delete=auto_delete)
rmq_mgmt.create_exchange(alternate_exch, new_props)
rmq_mgmt.create_exchange(opts.name, properties)
except requests.exceptions.HTTPError as e:
_stdout.write("Error Adding Exchange : {} \n".format(opts.name))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def add_queue(opts):
durable = _ask_yes_no("Do you want queue to be durable ")
auto_delete = _ask_yes_no("Do you want queue to be auto deleted ")
properties = dict(durable=durable, auto_delete=auto_delete)
try:
rmq_mgmt.create_queue(opts.name, properties)
except requests.exceptions.HTTPError as e:
_stdout.write("Error Adding Queue : {} \n".format(opts.name))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def list_vhosts(opts):
try:
vhosts = rmq_mgmt.get_virtualhosts()
for item in vhosts:
_stdout.write(item + "\n")
except requests.exceptions.HTTPError as e:
_stdout.write("No Virtual Hosts Found: {} \n")
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def list_users(opts):
try:
users = rmq_mgmt.get_users()
for item in users:
_stdout.write(item + "\n")
except requests.exceptions.HTTPError as e:
_stdout.write("No Users Found: {} \n")
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def list_user_properties(opts):
try:
props = rmq_mgmt.get_user_props(opts.user)
for key, value in props.iteritems():
_stdout.write("{0}: {1} \n".format(key, value))
except requests.exceptions.HTTPError as e:
_stdout.write("No User Found: {} \n".format(opts.user))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def list_exchanges(opts):
try:
exchanges = rmq_mgmt.get_exchanges()
for exch in exchanges:
_stdout.write(exch + "\n")
except requests.exceptions.HTTPError as e:
_stdout.write("No exchanges found \n")
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def list_exchanges_with_properties(opts):
exchanges = None
try:
exchanges = rmq_mgmt.get_exchanges_with_props()
except requests.exceptions.HTTPError as e:
_stdout.write("No exchanges found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
name_width = max(8, max(len(e['name']) for e in exchanges))
dur_width = len('DURABLE')
auto_width = len('AUTO-DELETE')
type_width = max(6, max(len(e['type']) for e in exchanges))
# args_width = max(6, max(len(e['type']) for e in exchanges))
fmt = '{:{}} {:{}} {:{}} {:{}}\n'
_stderr.write(
fmt.format('EXCHANGE', name_width, 'TYPE', type_width, 'DURABLE', dur_width,
'AUTO-DELETE', auto_width))
for exch in exchanges:
_stdout.write(fmt.format(exch['name'], name_width,
exch['type'], type_width,
str(exch['durable']), dur_width,
str(exch['auto_delete']), auto_width))
# exch['messages'], args_width))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in getting queue properties")
def list_queues(opts):
queues = None
try:
queues = rmq_mgmt.get_queues()
except requests.exceptions.HTTPError as e:
_stdout.write("No queues found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
if queues:
for q in queues:
_stdout.write(q + "\n")
def list_queues_with_properties(opts):
queues = None
try:
queues = rmq_mgmt.get_queues_with_props()
except requests.exceptions.HTTPError as e:
_stdout.write("No queues found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
name_width = max(5, max(len(q['name']) for q in queues))
dur_width = len('DURABLE')
excl_width = len('EXCLUSIVE')
auto_width = len('auto-delete')
state_width = len('running')
unack_width = len('MESSAGES')
fmt = '{:{}} {:{}} {:{}} {:{}} {:{}} {:{}}\n'
_stderr.write(
fmt.format('QUEUE', name_width, 'STATE', state_width, 'DURABLE', dur_width,
'EXCLUSIVE', excl_width, 'AUTO-DELETE', auto_width,
'MESSAGES', unack_width))
for q in queues:
_stdout.write(fmt.format(q['name'], name_width,
str(q['state']), state_width,
str(q['durable']), dur_width,
str(q['exclusive']), excl_width,
str(q['auto_delete']), auto_width,
q['messages'], unack_width))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in getting queue properties")
def list_connections(opts):
try:
conn = rmq_mgmt.get_connection()
except requests.exceptions.HTTPError as e:
_stdout.write("No connections found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
def list_fed_parameters(opts):
parameters = None
try:
parameters = rmq_mgmt.get_parameter('federation-upstream')
except requests.exceptions.HTTPError as e:
_stdout.write("No Federation Parameters Found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
if parameters:
name_width = max(5, max(len(p['name']) for p in parameters))
uri_width = max(3, max(len(p['value']['uri']) for p in parameters))
fmt = '{:{}} {:{}}\n'
_stderr.write(
fmt.format('NAME', name_width, 'URI', uri_width))
for param in parameters:
_stdout.write(fmt.format(param['name'], name_width,
param['value']['uri'], uri_width))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in federation parameters")
def list_shovel_parameters(opts):
parameters = None
try:
parameters = rmq_mgmt.get_parameter('shovel')
except requests.exceptions.HTTPError as e:
_stdout.write("No Shovel Parameters Found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
if parameters:
name_width = max(5, max(len(p['name']) for p in parameters))
src_uri_width = max(len('SOURCE ADDRESS'),
max(len(p['value']['src-uri']) for p in parameters))
dest_uri_width = max(len('DESTINATION ADDRESS'),
max(len(p['value']['dest-uri']) for p in parameters))
binding_key = max(len('BINDING KEY'),
max(len(p['value']['src-exchange-key']) for p in parameters))
fmt = '{:{}} {:{}} {:{}} {:{}}\n'
_stderr.write(
fmt.format('NAME', name_width,
'SOURCE ADDRESS', src_uri_width,
'DESTINATION ADDRESS', dest_uri_width,
'BINDING KEY', binding_key))
for param in parameters:
_stdout.write(fmt.format(param['name'], name_width,
param['value']['src-uri'], src_uri_width,
param['value']['dest-uri'], dest_uri_width,
param['value']['src-exchange-key'], binding_key))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in getting shovel parameters")
def list_bindings(opts):
bindings = None
try:
bindings = rmq_mgmt.get_bindings(opts.exchange)
except requests.exceptions.HTTPError as e:
_stdout.write("No Bindings Found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
if bindings:
src_width = max(5, max(len(b['source']) for b in bindings))
exch_width = len('EXCHANGE')
dest_width = max(len('QUEUE'), max(len(b['destination']) for b in bindings))
bindkey = len('BINDING KEY')
rkey = max(10, max(len(b['routing_key']) for b in bindings))
fmt = '{:{}} {:{}} {:{}}\n'
_stderr.write(
fmt.format('EXCHANGE', exch_width, 'QUEUE', dest_width, 'BINDING KEY', bindkey))
for b in bindings:
_stdout.write(fmt.format(b['source'], src_width,
b['destination'], dest_width,
b['routing_key'], rkey))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in getting bindings")
def list_policies(opts):
policies = None
try:
policies = rmq_mgmt.get_policies()
except requests.exceptions.HTTPError as e:
_stdout.write("No Policies Found \n")
return
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
return
try:
if policies:
name_width = max(5, max(len(p['name']) for p in policies))
apply_width = max(8, max(len(p['apply-to']) for p in policies))
fmt = '{:{}} {:{}}\n'
_stderr.write(
fmt.format('NAME', name_width, 'APPLY-TO', apply_width))
for policy in policies:
_stdout.write(fmt.format(policy['name'], name_width,
policy['apply-to'], apply_width))
except (AttributeError, KeyError) as ex:
_stdout.write("Error in getting policies")
def remove_vhosts(opts):
try:
for vhost in opts.vhost:
rmq_mgmt.delete_vhost(vhost)
except requests.exceptions.HTTPError as e:
_stdout.write("No Vhost Found {} \n".format(opts.vhost))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_users(opts):
try:
for user in opts.user:
rmq_mgmt.delete_user(user)
except requests.exceptions.HTTPError as e:
_stdout.write("No User Found {} \n".format(opts.user))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_exchanges(opts):
try:
for e in opts.exchanges:
rmq_mgmt.delete_exchange(e)
except requests.exceptions.HTTPError as e:
_stdout.write("No Exchange Found {} \n".format(opts.exchanges))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_queues(opts):
try:
for q in opts.queues:
rmq_mgmt.delete_queue(q)
except requests.exceptions.HTTPError as e:
_stdout.write("No Queues Found {} \n".format(opts.queues))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_fed_parameters(opts):
try:
for param in opts.parameters:
rmq_mgmt.delete_multiplatform_parameter('federation-upstream', param)
except requests.exceptions.HTTPError as e:
_stdout.write("No Federation Parameters Found {} \n".format(opts.parameters))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_shovel_parameters(opts):
try:
for param in opts.parameters:
rmq_mgmt.delete_multiplatform_parameter('shovel', param)
except requests.exceptions.HTTPError as e:
_stdout.write("No Shovel Parameters Found {} \n".format(opts.parameters))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def remove_policies(opts):
try:
for policy in opts.policies:
rmq_mgmt.delete_policy(policy)
except requests.exceptions.HTTPError as e:
_stdout.write("No Policies Found {} \n".format(opts.policies))
except (ConnectionError, NewConnectionError) as e:
_stdout.write("Error making request to RabbitMQ Management interface.\n"
"Check Connection Parameters: {} \n".format(e))
def main(argv=sys.argv):
# Refuse to run as root
if not getattr(os, 'getuid', lambda: -1)():
sys.stderr.write('%s: error: refusing to run as root to prevent '
'potential damage.\n' % os.path.basename(argv[0]))
sys.exit(77)
volttron_home = get_home()
os.environ['VOLTTRON_HOME'] = volttron_home
global_args = config.ArgumentParser(description='global options',
add_help=False)
global_args.add_argument('-c', '--config', metavar='FILE',
action='parse_config', ignore_unknown=True,
sections=[None, 'global', 'volttron-ctl'],
help='read configuration from FILE')
global_args.add_argument('--debug', action='store_true',
help='show tracbacks for errors rather than a brief message')
global_args.add_argument('-t', '--timeout', type=float, metavar='SECS',
help='timeout in seconds for remote calls (default: %(default)g)')
global_args.add_argument('--msgdebug',
help='route all messages to an agent while debugging')
global_args.add_argument(
'--vip-address', metavar='ZMQADDR',
help='ZeroMQ URL to bind for VIP connections')
global_args.set_defaults(
vip_address=get_address(),
timeout=60,
)
filterable = config.ArgumentParser(add_help=False)
filterable.add_argument('--name', dest='by_name', action='store_true',
help='filter/search by agent name')
filterable.add_argument('--tag', dest='by_tag', action='store_true',
help='filter/search by tag name')
filterable.add_argument('--uuid', dest='by_uuid', action='store_true',
help='filter/search by UUID (default)')
filterable.set_defaults(by_name=False, by_tag=False, by_uuid=False)
parser = config.ArgumentParser(
prog=os.path.basename(argv[0]), add_help=False,
description='Manage and control VOLTTRON agents.',
usage='%(prog)s command [OPTIONS] ...',
argument_default=argparse.SUPPRESS,
parents=[global_args]
)
parser.add_argument('-l', '--log', metavar='FILE', default=None,
help='send log output to FILE instead of stderr')
parser.add_argument('-L', '--log-config', metavar='FILE',
help='read logging configuration from FILE')
parser.add_argument('-q', '--quiet', action='add_const', const=10,
dest='verboseness',
help='decrease logger verboseness; may be used multiple times')
parser.add_argument('-v', '--verbose', action='add_const', const=-10,
dest='verboseness',
help='increase logger verboseness; may be used multiple times')
parser.add_argument('--verboseness', type=int, metavar='LEVEL',
default=logging.WARNING,
help='set logger verboseness')
parser.add_argument(
'--show-config', action='store_true',
help=argparse.SUPPRESS)
parser.add_help_argument()
parser.set_defaults(
log_config=None,
volttron_home=volttron_home,
)
top_level_subparsers = parser.add_subparsers(title='commands', metavar='',
dest='command')
def add_parser(*args, **kwargs):
parents = kwargs.get('parents', [])
parents.append(global_args)
kwargs['parents'] = parents
subparser = kwargs.pop("subparser", top_level_subparsers)
return subparser.add_parser(*args, **kwargs)
install = add_parser('install', help='install agent from wheel',
epilog='Optionally you may specify the --tag argument to tag the '
'agent during install without requiring a separate call to '
'the tag command. ')
install.add_argument('wheel', help='path to agent wheel')
install.add_argument('--tag', help='tag for the installed agent')
install.add_argument('--vip-identity', help='VIP IDENTITY for the installed agent. '
'Overrides any previously configured VIP IDENTITY.')
if HAVE_RESTRICTED:
install.add_argument('--verify', action='store_true',
dest='verify_agents',
help='verify agent integrity during install')
install.add_argument('--no-verify', action='store_false',
dest='verify_agents',
help=argparse.SUPPRESS)
install.set_defaults(func=install_agent, verify_agents=True)
tag = add_parser('tag', parents=[filterable],
help='set, show, or remove agent tag')
tag.add_argument('agent', help='UUID or name of agent')
group = tag.add_mutually_exclusive_group()
group.add_argument('tag', nargs='?', const=None, help='tag to give agent')
group.add_argument('-r', '--remove', action='store_true',
help='remove tag')
tag.set_defaults(func=tag_agent, tag=None, remove=False)
remove = add_parser('remove', parents=[filterable],
help='remove agent')
remove.add_argument('pattern', nargs='+', help='UUID or name of agent')
remove.add_argument('-f', '--force', action='store_true',
help='force removal of multiple agents')
remove.set_defaults(func=remove_agent, force=False)
peers = add_parser('peerlist', help='list the peers connected to the platform')
peers.set_defaults(func=list_peers)
list_ = add_parser('list', parents=[filterable],
help='list installed agent')
list_.add_argument('pattern', nargs='*',
help='UUID or name of agent')
list_.add_argument('-n', dest='min_uuid_len', type=int, metavar='N',
help='show at least N characters of UUID (0 to show all)')
list_.set_defaults(func=list_agents, min_uuid_len=1)
status = add_parser('status', parents=[filterable],
help='show status of agents')
status.add_argument('pattern', nargs='*',
help='UUID or name of agent')
status.add_argument('-n', dest='min_uuid_len', type=int, metavar='N',
help='show at least N characters of UUID (0 to show all)')
status.set_defaults(func=status_agents, min_uuid_len=1)
health = add_parser('health', parents=[filterable],
help='show agent health as JSON')
health.add_argument('pattern', nargs=1, help='UUID or name of agent')
health.set_defaults(func=agent_health, min_uuid_len=1)
clear = add_parser('clear', help='clear status of defunct agents')
clear.add_argument('-a', '--all', dest='clear_all', action='store_true',
help='clear the status of all agents')
clear.set_defaults(func=clear_status, clear_all=False)
enable = add_parser('enable', parents=[filterable],
help='enable agent to start automatically')
enable.add_argument('pattern', nargs='+', help='UUID or name of agent')
enable.add_argument('-p', '--priority', type=priority,
help='2-digit priority from 00 to 99')
enable.set_defaults(func=enable_agent, priority='50')
disable = add_parser('disable', parents=[filterable],
help='prevent agent from start automatically')
disable.add_argument('pattern', nargs='+', help='UUID or name of agent')
disable.set_defaults(func=disable_agent)
start = add_parser('start', parents=[filterable],
help='start installed agent')
start.add_argument('pattern', nargs='+', help='UUID or name of agent')
if HAVE_RESTRICTED:
start.add_argument('--verify', action='store_true',
dest='verify_agents',
help='verify agent integrity during start')
start.add_argument('--no-verify', action='store_false',
dest='verify_agents',
help=argparse.SUPPRESS)
start.set_defaults(func=start_agent)
stop = add_parser('stop', parents=[filterable],
help='stop agent')
stop.add_argument('pattern', nargs='+', help='UUID or name of agent')
stop.set_defaults(func=stop_agent)
restart = add_parser('restart', parents=[filterable],
help='restart agent')
restart.add_argument('pattern', nargs='+', help='UUID or name of agent')
restart.set_defaults(func=restart_agent)
run = add_parser('run',
help='start any agent by path')
run.add_argument('directory', nargs='+', help='path to agent directory')
if HAVE_RESTRICTED:
run.add_argument('--verify', action='store_true',
dest='verify_agents',
help='verify agent integrity during run')
run.add_argument('--no-verify', action='store_false',
dest='verify_agents',
help=argparse.SUPPRESS)
run.set_defaults(func=run_agent)
upgrade = add_parser('upgrade', help='upgrade agent from wheel',
epilog='Optionally you may specify the --tag argument to tag the '
'agent during upgrade without requiring a separate call to '
'the tag command. ')
upgrade.add_argument('vip_identity', metavar='vip-identity',
help='VIP IDENTITY of agent to upgrade')
upgrade.add_argument('wheel', help='path to new agent wheel')
upgrade.add_argument('--tag', help='tag for the upgraded agent')
if HAVE_RESTRICTED:
upgrade.add_argument('--verify', action='store_true',
dest='verify_agents',
help='verify agent integrity during upgrade')
upgrade.add_argument('--no-verify', action='store_false',
dest='verify_agents',
help=argparse.SUPPRESS)
upgrade.set_defaults(func=upgrade_agent, verify_agents=True)
auth_cmds = add_parser("auth",
help="manage authorization entries and encryption keys")
auth_subparsers = auth_cmds.add_subparsers(title='subcommands',
metavar='', dest='store_commands')
auth_add = add_parser('add',
help='add new authentication record',
subparser=auth_subparsers)
auth_add.add_argument('--domain', default=None)
auth_add.add_argument('--address', default=None)
auth_add.add_argument('--mechanism', default=None)
auth_add.add_argument('--credentials', default=None)
auth_add.add_argument('--user_id', default=None)
auth_add.add_argument('--groups', default=None,
help='delimit multiple entries with comma')
auth_add.add_argument('--roles', default=None,
help='delimit multiple entries with comma')
auth_add.add_argument('--capabilities', default=None,
help='delimit multiple entries with comma')
auth_add.add_argument('--comments', default=None)
auth_add.add_argument('--disabled', action='store_true')
auth_add.add_argument('--add-known-host', action='store_true',
help='adds entry in known host')
auth_add.set_defaults(func=add_auth)
auth_add_group = add_parser('add-group',
subparser=auth_subparsers,
help='associate a group name with a set of roles')
auth_add_group.add_argument('group', metavar='GROUP', help='name of group')
auth_add_group.add_argument('roles', metavar='ROLE',
nargs='*', help='roles to associate with the group')
auth_add_group.set_defaults(func=add_group)
auth_add_known_host = add_parser('add-known-host',
subparser=auth_subparsers,
help='add server public key to known-hosts file')
auth_add_known_host.add_argument('--host', required=True,
help='hostname or IP address with optional port')
auth_add_known_host.add_argument('--serverkey', required=True)
auth_add_known_host.set_defaults(func=add_server_key)
auth_add_role = add_parser('add-role',
subparser=auth_subparsers,
help='associate a role name with a set of capabilities')
auth_add_role.add_argument('role', metavar='ROLE', help='name of role')
auth_add_role.add_argument('capabilities', metavar='CAPABILITY',
nargs='*', help='capabilities to associate with the role')
auth_add_role.set_defaults(func=add_role)
auth_keypair = add_parser('keypair', subparser=auth_subparsers,
help='generate CurveMQ keys for encrypting VIP connections')
auth_keypair.set_defaults(func=gen_keypair)
auth_list = add_parser('list', help='list authentication records',
subparser=auth_subparsers)
auth_list.set_defaults(func=list_auth)
auth_list_groups = add_parser('list-groups',
subparser=auth_subparsers,
help='show list of group names and their sets of roles')
auth_list_groups.set_defaults(func=list_groups)
auth_list_known_host = add_parser('list-known-hosts',
subparser=auth_subparsers,
help='list entries from known-hosts file')
auth_list_known_host.set_defaults(func=list_known_hosts)
auth_list_roles = add_parser('list-roles',
subparser=auth_subparsers,
help='show list of role names and their sets of capabilities')
auth_list_roles.set_defaults(func=list_roles)
auth_publickey = add_parser('publickey', parents=[filterable],
subparser=auth_subparsers, help='show public key for each agent')
auth_publickey.add_argument('pattern', nargs='*',
help='UUID or name of agent')
auth_publickey.add_argument('-n', dest='min_uuid_len', type=int, metavar='N',
help='show at least N characters of UUID (0 to show all)')
auth_publickey.set_defaults(func=get_agent_publickey, min_uuid_len=1)
auth_remove = add_parser('remove', subparser=auth_subparsers,
help='removes one or more authentication records by indices')
auth_remove.add_argument('indices', nargs='+', type=int,
help='index or indices of record(s) to remove')
auth_remove.set_defaults(func=remove_auth)
auth_remove_group = add_parser('remove-group',
subparser=auth_subparsers,
help='disassociate a group name from a set of roles')
auth_remove_group.add_argument('group', help='name of group')
auth_remove_group.set_defaults(func=remove_group)
auth_remove_known_host = add_parser('remove-known-host',
subparser=auth_subparsers,
help='remove entry from known-hosts file')
auth_remove_known_host.add_argument('host', metavar='HOST',
help='hostname or IP address with optional port')
auth_remove_known_host.set_defaults(func=remove_known_host)
auth_remove_role = add_parser('remove-role',
subparser=auth_subparsers,
help='disassociate a role name from a set of capabilities')
auth_remove_role.add_argument('role', help='name of role')
auth_remove_role.set_defaults(func=remove_role)
auth_serverkey = add_parser('serverkey', subparser=auth_subparsers,
help="show the serverkey for the instance")
auth_serverkey.set_defaults(func=show_serverkey)
auth_update = add_parser('update', subparser=auth_subparsers,
help='updates one authentication record by index')
auth_update.add_argument('index', type=int,
help='index of record to update')
auth_update.set_defaults(func=update_auth)
auth_update_group = add_parser('update-group',
subparser=auth_subparsers,
help='update group to include (or remove) given roles')
auth_update_group.add_argument('group', metavar='GROUP', help='name of group')
auth_update_group.add_argument('roles', nargs='*',
metavar='ROLE',
help='roles to append to (or remove from) the group')
auth_update_group.add_argument('--remove', action='store_true',
help='remove (rather than append) given roles')
auth_update_group.set_defaults(func=update_group)
auth_update_role = add_parser('update-role',
subparser=auth_subparsers,
help='update role to include (or remove) given capabilities')
auth_update_role.add_argument('role', metavar='ROLE', help='name of role')
auth_update_role.add_argument('capabilities', nargs='*',
metavar='CAPABILITY',
help='capabilities to append to (or remove from) the role')
auth_update_role.add_argument('--remove', action='store_true',
help='remove (rather than append) given capabilities')
auth_update_role.set_defaults(func=update_role)
config_store = add_parser("config",
help="manage the platform configuration store")
config_store_subparsers = config_store.add_subparsers(title='subcommands', metavar='',
dest='store_commands')
config_store_store = add_parser("store",
help="store a configuration",
subparser=config_store_subparsers)
config_store_store.add_argument('identity',
help='VIP IDENTITY of the store')
config_store_store.add_argument('name',
help='name used to reference the configuration by in the store')
config_store_store.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='file containing the contents of the configuration')
config_store_store.add_argument('--raw', const="raw", dest="config_type", action="store_const",
help='interpret the input file as raw data')
config_store_store.add_argument('--json', const="json", dest="config_type", action="store_const",
help='interpret the input file as json')
config_store_store.add_argument('--csv', const="csv", dest="config_type", action="store_const",
help='interpret the input file as csv')
config_store_store.set_defaults(func=add_config_to_store,
config_type="json")
config_store_edit = add_parser("edit",
help="edit a configuration. (nano by default, respects EDITOR env variable)",
subparser=config_store_subparsers)
config_store_edit.add_argument('identity',
help='VIP IDENTITY of the store')
config_store_edit.add_argument('name',
help='name used to reference the configuration by in the store')
config_store_edit.add_argument('--editor', dest="editor",
help='Set the editor to use to change the file. Defaults to nano if EDITOR is not set',
default=os.getenv("EDITOR", "nano"))
config_store_edit.add_argument('--raw', const="raw", dest="config_type", action="store_const",
help='Interpret the configuration as raw data. If the file already exists this is ignored.')
config_store_edit.add_argument('--json', const="json", dest="config_type", action="store_const",
help='Interpret the configuration as json. If the file already exists this is ignored.')
config_store_edit.add_argument('--csv', const="csv", dest="config_type", action="store_const",
help='Interpret the configuration as csv. If the file already exists this is ignored.')
config_store_edit.add_argument('--new', dest="new_config", action="store_true",
help='Ignore any existing configuration and creates new empty file.'
' Configuration is not written if left empty. Type defaults to JSON.')
config_store_edit.set_defaults(func=edit_config,
config_type="json")
config_store_delete = add_parser("delete",
help="delete a configuration",
subparser=config_store_subparsers)
config_store_delete.add_argument('identity',
help='VIP IDENTITY of the store')
config_store_delete.add_argument('name', nargs='?',
help='name used to reference the configuration by in the store')
config_store_delete.add_argument('--all', dest="delete_store", action="store_true",
help='delete all configurations in the store')
config_store_delete.set_defaults(func=delete_config_from_store)
config_store_list = add_parser("list",
help="list stores or configurations in a store",
subparser=config_store_subparsers)
config_store_list.add_argument('identity', nargs='?',
help='VIP IDENTITY of the store to list')
config_store_list.set_defaults(func=list_store)
config_store_get = add_parser("get",
help="get the contents of a configuration",
subparser=config_store_subparsers)
config_store_get.add_argument('identity',
help='VIP IDENTITY of the store')
config_store_get.add_argument('name',
help='name used to reference the configuration by in the store')
config_store_get.add_argument('--raw', action="store_true",
help='get the configuration as raw data')
config_store_get.set_defaults(func=get_config)
shutdown = add_parser('shutdown',
help='stop all agents')
shutdown.add_argument('--platform', action='store_true',
help='also stop the platform process')
shutdown.set_defaults(func=shutdown_agents, platform=False)
send = add_parser('send',
help='send agent and start on a remote platform')
send.add_argument('wheel', nargs='+', help='agent package to send')
send.set_defaults(func=send_agent)
stats = add_parser('stats',
help='manage router message statistics tracking')
op = stats.add_argument(
'op', choices=['status', 'enable', 'disable', 'dump', 'pprint'],
nargs='?')
stats.set_defaults(func=do_stats, op='status')
# ==============================================================================
global message_bus, rmq_mgmt
if message_bus == 'rmq':
rmq_mgmt = RabbitMQMgmt()
# Add commands
rabbitmq_cmds = add_parser("rabbitmq", help="manage rabbitmq")
rabbitmq_subparsers = rabbitmq_cmds.add_subparsers(title='subcommands',
metavar='',
dest='store_commands')
rabbitmq_add_vhost = add_parser('add-vhost', help='add a new virtual host',
subparser=rabbitmq_subparsers)
rabbitmq_add_vhost.add_argument('vhost', help='Virtual host')
rabbitmq_add_vhost.set_defaults(func=add_vhost)
rabbitmq_add_user = add_parser('add-user',
help='Add a new user. User will have admin privileges i.e,'
'configure, read and write',
subparser=rabbitmq_subparsers)
rabbitmq_add_user.add_argument('user', help='user id')
rabbitmq_add_user.add_argument('pwd', help='password')
rabbitmq_add_user.set_defaults(func=add_user)
rabbitmq_add_exchange = add_parser('add-exchange',
help='add a new exchange',
subparser=rabbitmq_subparsers)
rabbitmq_add_exchange.add_argument('name', help='Name of the exchange')
rabbitmq_add_exchange.add_argument('type', help='Type of the exchange - fanout/direct/topic')
rabbitmq_add_exchange.set_defaults(func=add_exchange)
rabbitmq_add_queue = add_parser('add-queue',
help='add a new queue',
subparser=rabbitmq_subparsers)
rabbitmq_add_queue.add_argument('name', help='Name of the queue')
rabbitmq_add_queue.set_defaults(func=add_queue)
# =======================================================================
# List commands
rabbitmq_list_vhosts = add_parser('list-vhosts', help='List virtual hosts',
subparser=rabbitmq_subparsers)
rabbitmq_list_vhosts.set_defaults(func=list_vhosts)
rabbitmq_list_users = add_parser('list-users', help='List users',
subparser=rabbitmq_subparsers)
rabbitmq_list_users.set_defaults(func=list_users)
rabbitmq_list_user_properties = add_parser('list-user-properties', help='List users',
subparser=rabbitmq_subparsers)
rabbitmq_list_user_properties.add_argument('user', help='RabbitMQ user id')
rabbitmq_list_user_properties.set_defaults(func=list_user_properties)
rabbitmq_list_exchanges = add_parser('list-exchanges', help='List exhanges',
subparser=rabbitmq_subparsers)
rabbitmq_list_exchanges.set_defaults(func=list_exchanges)
rabbitmq_list_exchanges_props = add_parser('list-exchange-properties', help='list exchanges with properties',
subparser=rabbitmq_subparsers)
rabbitmq_list_exchanges_props.set_defaults(func=list_exchanges_with_properties)
rabbitmq_list_queues = add_parser('list-queues', help='list all queues',
subparser=rabbitmq_subparsers)
rabbitmq_list_queues.set_defaults(func=list_queues)
rabbitmq_list_queues_props = add_parser('list-queue-properties', help='list queues with properties',
subparser=rabbitmq_subparsers)
rabbitmq_list_queues_props.set_defaults(func=list_queues_with_properties)
rabbitmq_list_bindings = add_parser('list-bindings', help='list all bindings with exchange',
subparser=rabbitmq_subparsers)
rabbitmq_list_bindings.add_argument('exchange', help='Source exchange')
rabbitmq_list_bindings.set_defaults(func=list_bindings)
rabbitmq_list_fed_parameters = add_parser('list-federation-parameters', help='list all federation parameters',
subparser=rabbitmq_subparsers)
rabbitmq_list_fed_parameters.set_defaults(func=list_fed_parameters)
rabbitmq_list_shovel_parameters = add_parser('list-shovel-parameters', help='list all shovel parameters',
subparser=rabbitmq_subparsers)
rabbitmq_list_shovel_parameters.set_defaults(func=list_shovel_parameters)
rabbitmq_list_policies = add_parser('list-policies', help='list all policies',
subparser=rabbitmq_subparsers)
rabbitmq_list_policies.set_defaults(func=list_policies)
# ==========================================================================================
# Remove commands
rabbitmq_remove_vhosts = add_parser('remove-vhosts', help='Remove virtual host/s',
subparser=rabbitmq_subparsers)
rabbitmq_remove_vhosts.add_argument('vhost', nargs='+', help='Virtual host')
rabbitmq_remove_vhosts.set_defaults(func=remove_vhosts)
rabbitmq_remove_users = add_parser('remove-users', help='Remove virtual user/s',
subparser=rabbitmq_subparsers)
rabbitmq_remove_users.add_argument('user', nargs='+', help='Virtual host')
rabbitmq_remove_users.set_defaults(func=remove_users)
rabbitmq_remove_exchanges = add_parser('remove-exchanges', help='Remove exchange/s',
subparser=rabbitmq_subparsers)
rabbitmq_remove_exchanges.add_argument('exchanges', nargs='+', help='Remove exchanges/s')
rabbitmq_remove_exchanges.set_defaults(func=remove_exchanges)
rabbitmq_remove_queues = add_parser('remove-queues', help='Remove queue/s',
subparser=rabbitmq_subparsers)
rabbitmq_remove_queues.add_argument('queues', nargs='+', help='Queue')
rabbitmq_remove_queues.set_defaults(func=remove_queues)
rabbitmq_remove_fed_parameters = add_parser('remove-federation-parameters',
help='Remove federation parameter',
subparser=rabbitmq_subparsers)
rabbitmq_remove_fed_parameters.add_argument('parameters', nargs='+', help='parameter name/s')
rabbitmq_remove_fed_parameters.set_defaults(func=remove_fed_parameters)
rabbitmq_remove_shovel_parameters = add_parser('remove-shovel-parameters',
help='Remove shovel parameter',
subparser=rabbitmq_subparsers)
rabbitmq_remove_shovel_parameters.add_argument('parameters', nargs='+', help='parameter name/s')
rabbitmq_remove_shovel_parameters.set_defaults(func=remove_shovel_parameters)
rabbitmq_remove_policies = add_parser('remove-policies', help='Remove policy',
subparser=rabbitmq_subparsers)
rabbitmq_remove_policies.add_argument('policies', nargs='+', help='policy name/s')
rabbitmq_remove_policies.set_defaults(func=remove_policies)
# ===============================================================================================
if HAVE_RESTRICTED:
cgroup = add_parser('create-cgroups',
help='setup VOLTTRON control group for restricted execution')
cgroup.add_argument('-u', '--user', metavar='USER',
help='owning user name or ID')
cgroup.add_argument('-g', '--group', metavar='GROUP',
help='owning group name or ID')
cgroup.set_defaults(func=create_cgroups, user=None, group=None)
# Parse and expand options
args = argv[1:]
# TODO: for auth some of the commands will work when volttron is down and
# some will error (example vctl auth serverkey). Do check inside auth
# function
# Below vctl commands can work even when volttron is not up. For others
# volttron need to be up.
if len(args) > 0:
if args[0] not in ('list', 'tag', 'auth', 'rabbitmq'):
# check pid file
if not utils.is_volttron_running(volttron_home):
_stderr.write("VOLTTRON is not running. This command "
"requires VOLTTRON platform to be running\n")
return 10
conf = os.path.join(volttron_home, 'config')
if os.path.exists(conf) and 'SKIP_VOLTTRON_CONFIG' not in os.environ:
args = ['--config', conf] + args
opts = parser.parse_args(args)
if opts.log:
opts.log = config.expandall(opts.log)
if opts.log_config:
opts.log_config = config.expandall(opts.log_config)
opts.vip_address = config.expandall(opts.vip_address)
if getattr(opts, 'show_config', False):
for name, value in sorted(vars(opts).iteritems()):
print(name, repr(value))
return
# Configure logging
level = max(1, opts.verboseness)
if opts.log is None:
log_to_file(sys.stderr, level)
elif opts.log == '-':
log_to_file(sys.stdout, level)
elif opts.log:
log_to_file(
opts.log, level,
handler_class=logging.handlers.WatchedFileHandler)
else:
log_to_file(None, 100, handler_class=lambda x: logging.NullHandler())
if opts.log_config:
logging.config.fileConfig(opts.log_config)
opts.aip = aipmod.AIPplatform(opts)
opts.aip.setup()
opts.connection = ControlConnection(opts.vip_address,
**get_keys(opts))
try:
with gevent.Timeout(opts.timeout):
return opts.func(opts)
except gevent.Timeout:
_stderr.write('{}: operation timed out\n'.format(opts.command))
return 75
except RemoteError as exc:
print_tb = exc.print_tb
error = exc.message
except Exception as exc:
print_tb = traceback.print_exc
error = str(exc)
else:
return 0
if opts.debug:
print_tb()
_stderr.write('{}: error: {}\n'.format(opts.command, error))
return 20
def _main():
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
_main()
| []
| []
| [
"VOLTTRON_HOME",
"EDITOR"
]
| [] | ["VOLTTRON_HOME", "EDITOR"] | python | 2 | 0 | |
01-Authorization/auth0authorization/views.py | import os
import jwt
import json
from functools import wraps
from django.http import JsonResponse
from rest_framework.decorators import api_view
from six.moves.urllib import request as req
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
# Create your views here.
def get_token_auth_header(request):
"""Obtains the access token from the Authorization Header
"""
auth = request.META.get("HTTP_AUTHORIZATION", None)
parts = auth.split()
token = parts[1]
return token
def requires_scope(required_scope):
"""Determines if the required scope is present in the access token
Args:
required_scope (str): The scope required to access the resource
"""
def require_scope(f):
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header(args[0])
AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
API_IDENTIFIER = os.environ.get('API_IDENTIFIER')
jsonurl = req.urlopen('https://' + AUTH0_DOMAIN + '/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
cert = '-----BEGIN CERTIFICATE-----\n' + jwks['keys'][0]['x5c'][0] + '\n-----END CERTIFICATE-----'
certificate = load_pem_x509_certificate(cert.encode('utf-8'), default_backend())
public_key = certificate.public_key()
decoded = jwt.decode(token, public_key, audience=API_IDENTIFIER, algorithms=['RS256'])
if decoded.get("scope"):
token_scopes = decoded["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return f(*args, **kwargs)
response = JsonResponse({'message': 'You don\'t have access to this resource'})
response.status_code = 403
return response
return decorated
return require_scope
def public(request):
return JsonResponse({'message': 'Hello from a public endpoint! You don\'t need to be authenticated to see this.'})
@api_view(['GET'])
def private(request):
return JsonResponse({'message': 'Hello from a private endpoint! You need to be authenticated to see this.'})
@api_view(['GET'])
@requires_scope('read:messages')
def private_scoped(request):
return JsonResponse({'message': 'Hello from a private endpoint! You need to be authenticated and have a scope of read:messages to see this.'})
| []
| []
| [
"AUTH0_DOMAIN",
"API_IDENTIFIER"
]
| [] | ["AUTH0_DOMAIN", "API_IDENTIFIER"] | python | 2 | 0 | |
main.py | ###############################################################################
#
# Author: Gregory A. Bauer, Jasper Wong, Amy Robertson
# Email: [email protected]
# Course: CS467_400_W2021
#
# Description:
# Launches web application by serving up landing page
#
# Note:
# Main should be clear of excessive routes. All other routes have been
# modularized and placed in separate python modules.
#
# References:
# https://stackoverflow.com/questions/53176162/google-oauth-scope-changed-during-authentication-but-scope-is-same
# https://stackoverflow.com/questions/22669528/securely-storing-environment-variables-in-gae-with-app-yaml?rq=1
# https://stackoverflow.com/questions/18709213/flask-session-not-persisting
###############################################################################
from flask import Flask, render_template, session, redirect
import OAuth
import pets
import users
import admin
import news
import adopt
import applications
import constants
from repository import PetDsRepository
import os
# This disables the requirement to use HTTPS so that you can test locally.
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# Disables scope change warning
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
app = Flask(__name__)
app.register_blueprint(OAuth.bp)
app.register_blueprint(users.bp)
app.register_blueprint(pets.bp)
app.register_blueprint(admin.bp)
app.register_blueprint(news.bp)
app.register_blueprint(adopt.bp)
app.register_blueprint(applications.bp)
# app.secret_key = os.urandom(24)
app.secret_key = constants.SECRET_KEY
###############################################################################
# Landing page with google login
@app.route('/')
def index():
status = PetDsRepository.getLatestStatus()
return render_template('index.html',
status=status,
public_url=constants.BUCKET)
###############################################################################
@app.route('/logout', methods=['GET'])
def logout():
session.clear()
return redirect('/')
###############################################################################
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| []
| []
| [
"OAUTHLIB_RELAX_TOKEN_SCOPE",
"OAUTHLIB_INSECURE_TRANSPORT"
]
| [] | ["OAUTHLIB_RELAX_TOKEN_SCOPE", "OAUTHLIB_INSECURE_TRANSPORT"] | python | 2 | 0 | |
pkg/args/args.go | /*
* Copyright 2019 gosoon.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package args has common command-line flags for generation programs.
package args
import (
"bytes"
goflag "flag"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/parser"
"k8s.io/gengo/types"
"github.com/spf13/pflag"
)
// Default returns a defaulted GeneratorArgs. You may change the defaults
// before calling AddFlags.
func Default() *GeneratorArgs {
return &GeneratorArgs{
OutputBase: DefaultSourceTree(),
GoHeaderFilePath: filepath.Join(DefaultSourceTree(),
"github.com/gosoon/code-generator/pkg/boilerplate/boilerplate.go.txt"),
GeneratedBuildTag: "ignore_autogenerated",
//GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.",
defaultCommandLineFlags: true,
}
}
// GeneratorArgs has arguments that are passed to generators.
type GeneratorArgs struct {
// Which directories to parse.
InputDirs []string
// Source tree to write results to.
OutputBase string
// Package path within the source tree.
OutputPackagePath string
// Output file name.
OutputFileBaseName string
// Where to get copyright header text.
GoHeaderFilePath string
// If GeneratedByCommentTemplate is set, generate a "Code generated by" comment
// below the bloilerplate, of the format defined by this string.
// Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator.
GeneratedByCommentTemplate string
// If true, only verify, don't write anything.
VerifyOnly bool
// GeneratedBuildTag is the tag used to identify code generated by execution
// of this type. Each generator should use a different tag, and different
// groups of generators (external API that depends on Kube generations) should
// keep tags distinct as well.
GeneratedBuildTag string
// Any custom arguments go here
CustomArgs interface{}
// Whether to use default command line flags
defaultCommandLineFlags bool
}
// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing.
func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs {
g.defaultCommandLineFlags = false
return g
}
func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) {
fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.")
fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.")
fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.")
fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.")
fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.")
fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.")
fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.")
}
// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file.
func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) {
b, err := ioutil.ReadFile(g.GoHeaderFilePath)
if err != nil {
return nil, err
}
b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().Year())), -1)
if g.GeneratedByCommentTemplate != "" {
if len(b) != 0 {
b = append(b, byte('\n'))
}
generatorName := path.Base(os.Args[0])
generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1)
s := fmt.Sprintf("%s\n\n", generatedByComment)
b = append(b, []byte(s)...)
}
return b, nil
}
// NewBuilder makes a new parser.Builder and populates it with the input
// directories.
func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) {
b := parser.New()
// Ignore all auto-generated files.
b.AddBuildTags(g.GeneratedBuildTag)
for _, d := range g.InputDirs {
var err error
if strings.HasSuffix(d, "/...") {
err = b.AddDirRecursive(strings.TrimSuffix(d, "/..."))
} else {
err = b.AddDir(d)
}
if err != nil {
return nil, fmt.Errorf("unable to add directory %q: %v", d, err)
}
}
return b, nil
}
// InputIncludes returns true if the given package is a (sub) package of one of
// the InputDirs.
func (g *GeneratorArgs) InputIncludes(p *types.Package) bool {
for _, dir := range g.InputDirs {
d := dir
if strings.HasSuffix(d, "...") {
d = strings.TrimSuffix(d, "...")
}
if strings.HasPrefix(p.Path, d) {
return true
}
}
return false
}
// DefaultSourceTree returns the /src directory of the first entry in $GOPATH.
// If $GOPATH is empty, it returns "./". Useful as a default output location.
func DefaultSourceTree() string {
paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator))
if len(paths) > 0 && len(paths[0]) > 0 {
return filepath.Join(paths[0], "src")
}
return "./"
}
// Execute implements main().
// If you don't need any non-default behavior, use as:
// args.Default().Execute(...)
func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error {
if g.defaultCommandLineFlags {
g.AddFlags(pflag.CommandLine)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
}
b, err := g.NewBuilder()
if err != nil {
return fmt.Errorf("Failed making a parser: %v", err)
}
c, err := generator.NewContext(b, nameSystems, defaultSystem)
if err != nil {
return fmt.Errorf("Failed making a context: %v", err)
}
c.Verify = g.VerifyOnly
packages := pkgs(c, g)
if err := c.ExecutePackages(g.OutputBase, packages); err != nil {
return fmt.Errorf("Failed executing generator: %v", err)
}
return nil
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
vendor/github.com/hashicorp/packer/packer/ui.go | package packer
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"runtime"
"strings"
"syscall"
"time"
"unicode"
getter "github.com/hashicorp/go-getter/v2"
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
)
var ErrInterrupted = errors.New("interrupted")
type UiColor uint
const (
UiColorRed UiColor = 31
UiColorGreen = 32
UiColorYellow = 33
UiColorBlue = 34
UiColorMagenta = 35
UiColorCyan = 36
)
// ColoredUi is a UI that is colored using terminal colors.
type ColoredUi struct {
Color UiColor
ErrorColor UiColor
Ui packersdk.Ui
PB getter.ProgressTracker
}
var _ packersdk.Ui = new(ColoredUi)
func (u *ColoredUi) Ask(query string) (string, error) {
return u.Ui.Ask(u.colorize(query, u.Color, true))
}
func (u *ColoredUi) Say(message string) {
u.Ui.Say(u.colorize(message, u.Color, true))
}
func (u *ColoredUi) Message(message string) {
u.Ui.Message(u.colorize(message, u.Color, false))
}
func (u *ColoredUi) Error(message string) {
color := u.ErrorColor
if color == 0 {
color = UiColorRed
}
u.Ui.Error(u.colorize(message, color, true))
}
func (u *ColoredUi) Machine(t string, args ...string) {
// Don't colorize machine-readable output
u.Ui.Machine(t, args...)
}
func (u *ColoredUi) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) io.ReadCloser {
return u.Ui.TrackProgress(u.colorize(src, u.Color, false), currentSize, totalSize, stream)
}
func (u *ColoredUi) colorize(message string, color UiColor, bold bool) string {
if !u.supportsColors() {
return message
}
attr := 0
if bold {
attr = 1
}
return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color, message)
}
func (u *ColoredUi) supportsColors() bool {
// Never use colors if we have this environmental variable
if os.Getenv("PACKER_NO_COLOR") != "" {
return false
}
// For now, on non-Windows machine, just assume it does
if runtime.GOOS != "windows" {
return true
}
// On Windows, if we appear to be in Cygwin, then it does
cygwin := os.Getenv("CYGWIN") != "" ||
os.Getenv("OSTYPE") == "cygwin" ||
os.Getenv("TERM") == "cygwin"
return cygwin
}
// TargetedUI is a UI that wraps another UI implementation and modifies
// the output to indicate a specific target. Specifically, all Say output
// is prefixed with the target name. Message output is not prefixed but
// is offset by the length of the target so that output is lined up properly
// with Say output. Machine-readable output has the proper target set.
type TargetedUI struct {
Target string
Ui packersdk.Ui
}
var _ packersdk.Ui = new(TargetedUI)
func (u *TargetedUI) Ask(query string) (string, error) {
return u.Ui.Ask(u.prefixLines(true, query))
}
func (u *TargetedUI) Say(message string) {
u.Ui.Say(u.prefixLines(true, message))
}
func (u *TargetedUI) Message(message string) {
u.Ui.Message(u.prefixLines(false, message))
}
func (u *TargetedUI) Error(message string) {
u.Ui.Error(u.prefixLines(true, message))
}
func (u *TargetedUI) Machine(t string, args ...string) {
// Prefix in the target, then pass through
u.Ui.Machine(fmt.Sprintf("%s,%s", u.Target, t), args...)
}
func (u *TargetedUI) prefixLines(arrow bool, message string) string {
arrowText := "==>"
if !arrow {
arrowText = strings.Repeat(" ", len(arrowText))
}
var result bytes.Buffer
for _, line := range strings.Split(message, "\n") {
result.WriteString(fmt.Sprintf("%s %s: %s\n", arrowText, u.Target, line))
}
return strings.TrimRightFunc(result.String(), unicode.IsSpace)
}
func (u *TargetedUI) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) io.ReadCloser {
return u.Ui.TrackProgress(u.prefixLines(false, src), currentSize, totalSize, stream)
}
// MachineReadableUi is a UI that only outputs machine-readable output
// to the given Writer.
type MachineReadableUi struct {
Writer io.Writer
PB packersdk.NoopProgressTracker
}
var _ packersdk.Ui = new(MachineReadableUi)
func (u *MachineReadableUi) Ask(query string) (string, error) {
return "", errors.New("machine-readable UI can't ask")
}
func (u *MachineReadableUi) Say(message string) {
u.Machine("ui", "say", message)
}
func (u *MachineReadableUi) Message(message string) {
u.Machine("ui", "message", message)
}
func (u *MachineReadableUi) Error(message string) {
u.Machine("ui", "error", message)
}
func (u *MachineReadableUi) Machine(category string, args ...string) {
now := time.Now().UTC()
// Determine if we have a target, and set it
target := ""
commaIdx := strings.Index(category, ",")
if commaIdx > -1 {
target = category[0:commaIdx]
category = category[commaIdx+1:]
}
// Prepare the args
for i, v := range args {
// Use packersdk.LogSecretFilter to scrub out sensitive variables
args[i] = packersdk.LogSecretFilter.FilterString(args[i])
args[i] = strings.Replace(v, ",", "%!(PACKER_COMMA)", -1)
args[i] = strings.Replace(args[i], "\r", "\\r", -1)
args[i] = strings.Replace(args[i], "\n", "\\n", -1)
}
argsString := strings.Join(args, ",")
_, err := fmt.Fprintf(u.Writer, "%d,%s,%s,%s\n", now.Unix(), target, category, argsString)
if err != nil {
if err == syscall.EPIPE || strings.Contains(err.Error(), "broken pipe") {
// Ignore epipe errors because that just means that the file
// is probably closed or going to /dev/null or something.
} else {
panic(err)
}
}
log.Printf("%d,%s,%s,%s\n", now.Unix(), target, category, argsString)
}
func (u *MachineReadableUi) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) {
return u.PB.TrackProgress(src, currentSize, totalSize, stream)
}
// TimestampedUi is a UI that wraps another UI implementation and
// prefixes each message with an RFC3339 timestamp
type TimestampedUi struct {
Ui packersdk.Ui
PB getter.ProgressTracker
}
var _ packersdk.Ui = new(TimestampedUi)
func (u *TimestampedUi) Ask(query string) (string, error) {
return u.Ui.Ask(query)
}
func (u *TimestampedUi) Say(message string) {
u.Ui.Say(u.timestampLine(message))
}
func (u *TimestampedUi) Message(message string) {
u.Ui.Message(u.timestampLine(message))
}
func (u *TimestampedUi) Error(message string) {
u.Ui.Error(u.timestampLine(message))
}
func (u *TimestampedUi) Machine(message string, args ...string) {
u.Ui.Machine(message, args...)
}
func (u *TimestampedUi) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) {
return u.Ui.TrackProgress(src, currentSize, totalSize, stream)
}
func (u *TimestampedUi) timestampLine(string string) string {
return fmt.Sprintf("%v: %v", time.Now().Format(time.RFC3339), string)
}
| [
"\"PACKER_NO_COLOR\"",
"\"CYGWIN\"",
"\"OSTYPE\"",
"\"TERM\""
]
| []
| [
"PACKER_NO_COLOR",
"TERM",
"CYGWIN",
"OSTYPE"
]
| [] | ["PACKER_NO_COLOR", "TERM", "CYGWIN", "OSTYPE"] | go | 4 | 0 | |
pkg/runner/local_runner_test.go | package runner_test
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"github.com/buildbarn/bb-remote-execution/internal/mock"
runner_pb "github.com/buildbarn/bb-remote-execution/pkg/proto/runner"
"github.com/buildbarn/bb-remote-execution/pkg/runner"
"github.com/buildbarn/bb-storage/pkg/filesystem"
"github.com/buildbarn/bb-storage/pkg/filesystem/path"
"github.com/buildbarn/bb-storage/pkg/testutil"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestLocalRunner(t *testing.T) {
ctrl := gomock.NewController(t)
buildDirectoryPath := t.TempDir()
buildDirectory, err := filesystem.NewLocalDirectory(buildDirectoryPath)
require.NoError(t, err)
defer buildDirectory.Close()
buildDirectoryPathBuilder, scopeWalker := path.EmptyBuilder.Join(path.VoidScopeWalker)
require.NoError(t, path.Resolve(buildDirectoryPath, scopeWalker))
var cmdPath string
var getEnvCommand []string
if runtime.GOOS == "windows" {
cmdPath = filepath.Join(os.Getenv("SYSTEMROOT"), "system32\\cmd.exe")
getEnvCommand = []string{cmdPath, "/d", "/c", "set"}
} else {
getEnvCommand = []string{"/usr/bin/env"}
}
t.Run("EmptyEnvironment", func(t *testing.T) {
if runtime.GOOS == "windows" {
return
}
testPath := filepath.Join(buildDirectoryPath, "EmptyEnvironment")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// Running a command without specifying any environment
// variables should cause the process to be executed in
// an empty environment. It should not inherit the
// environment of the runner.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: getEnvCommand,
StdoutPath: "EmptyEnvironment/stdout",
StderrPath: "EmptyEnvironment/stderr",
InputRootDirectory: "EmptyEnvironment/root",
TemporaryDirectory: "EmptyEnvironment/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.Empty(t, stdout)
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("NonEmptyEnvironment", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "NonEmptyEnvironment")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
tmpPath := filepath.Join(testPath, "tmp")
require.NoError(t, os.Mkdir(tmpPath, 0o777))
// The environment variables provided in the RunRequest
// should be respected. If automatic injection of TMPDIR
// is enabled, that variable should also be added.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), true)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: getEnvCommand,
EnvironmentVariables: map[string]string{
"FOO": "bar",
"BAZ": "xyzzy",
},
StdoutPath: "NonEmptyEnvironment/stdout",
StderrPath: "NonEmptyEnvironment/stderr",
InputRootDirectory: "NonEmptyEnvironment/root",
TemporaryDirectory: "NonEmptyEnvironment/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
if runtime.GOOS == "windows" {
require.Subset(t, strings.Fields(string(stdout)), []string{
"FOO=bar",
"BAZ=xyzzy",
"TMP=" + tmpPath,
"TEMP=" + tmpPath,
})
} else {
require.ElementsMatch(t, []string{
"FOO=bar",
"BAZ=xyzzy",
"TMPDIR=" + tmpPath,
}, strings.Fields(string(stdout)))
}
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("OverridingTmpdir", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "OverridingTmpdir")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
tmpPath := filepath.Join(testPath, "tmp")
require.NoError(t, os.Mkdir(tmpPath, 0o777))
var envMap map[string]string
if runtime.GOOS == "windows" {
envMap = map[string]string{
"TMP": "\\somewhere\\else",
"TEMP": "\\somewhere\\else",
}
} else {
envMap = map[string]string{
"TMPDIR": "/somewhere/else",
}
}
// Automatic injection of TMPDIR should have no effect
// if the command to be run provides its own TMPDIR.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), true)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: getEnvCommand,
EnvironmentVariables: envMap,
StdoutPath: "OverridingTmpdir/stdout",
StderrPath: "OverridingTmpdir/stderr",
InputRootDirectory: "OverridingTmpdir/root",
TemporaryDirectory: "OverridingTmpdir/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(0), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
if runtime.GOOS == "windows" {
require.Subset(t, strings.Fields(string(stdout)), []string{
"TMP=\\somewhere\\else",
"TEMP=\\somewhere\\else",
})
} else {
require.Equal(t, "TMPDIR=/somewhere/else\n", string(stdout))
}
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("NonZeroExitCode", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "NonZeroExitCode")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// Non-zero exit codes should be captured in the
// RunResponse. POSIX 2008 and later added support for
// 32-bit signed exit codes. Most implementations still
// truncate the exit code to 8 bits.
var exit255Command []string
if runtime.GOOS == "windows" {
exit255Command = []string{cmdPath, "/d", "/c", "exit 255"}
} else {
exit255Command = []string{"/bin/sh", "-c", "exit 255"}
}
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
response, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: exit255Command,
StdoutPath: "NonZeroExitCode/stdout",
StderrPath: "NonZeroExitCode/stderr",
InputRootDirectory: "NonZeroExitCode/root",
TemporaryDirectory: "NonZeroExitCode/tmp",
})
require.NoError(t, err)
require.Equal(t, int32(255), response.ExitCode)
stdout, err := ioutil.ReadFile(filepath.Join(testPath, "stdout"))
require.NoError(t, err)
require.Empty(t, stdout)
stderr, err := ioutil.ReadFile(filepath.Join(testPath, "stderr"))
require.NoError(t, err)
require.Empty(t, stderr)
})
t.Run("UnknownCommandInPath", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "UnknownCommandInPath")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// If argv[0] consists of a single filename, lookups
// against $PATH need to be performed. If the executable
// can't be found in any of the directories, the action
// should fail with a non-retriable error.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"nonexistent_command"},
StdoutPath: "UnknownCommandInPath/stdout",
StderrPath: "UnknownCommandInPath/stderr",
InputRootDirectory: "UnknownCommandInPath/root",
TemporaryDirectory: "UnknownCommandInPath/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err)
})
t.Run("UnknownCommandRelative", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "UnknownCommandRelative")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// If argv[0] is not an absolute path, but does consist
// of multiple components, no $PATH lookup is performed.
// If the path does not exist, the action should fail
// with a non-retriable error.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"./nonexistent_command"},
StdoutPath: "UnknownCommandRelative/stdout",
StderrPath: "UnknownCommandRelative/stderr",
InputRootDirectory: "UnknownCommandRelative/root",
TemporaryDirectory: "UnknownCommandRelative/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err)
})
t.Run("UnknownCommandAbsolute", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "UnknownCommandAbsolute")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// If argv[0] is an absolute path that does not exist,
// we should also return a non-retriable error.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/nonexistent_command"},
StdoutPath: "UnknownCommandAbsolute/stdout",
StderrPath: "UnknownCommandAbsolute/stderr",
InputRootDirectory: "UnknownCommandAbsolute/root",
TemporaryDirectory: "UnknownCommandAbsolute/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err)
})
t.Run("ExecFormatError", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "ExecFormatError")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
require.NoError(t, os.WriteFile(filepath.Join(testPath, "root", "not_a.binary"), []byte{
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
}, 0o777))
// If argv[0] is a binary that cannot be executed we should also return
// a non-retriable error.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"./not_a.binary"},
StdoutPath: "ExecFormatError/stdout",
StderrPath: "ExecFormatError/stderr",
InputRootDirectory: "ExecFormatError/root",
TemporaryDirectory: "ExecFormatError/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err)
})
t.Run("UnknownCommandDirectory", func(t *testing.T) {
testPath := filepath.Join(buildDirectoryPath, "UnknownCommandDirectory")
require.NoError(t, os.Mkdir(testPath, 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "root"), 0o777))
require.NoError(t, os.Mkdir(filepath.Join(testPath, "tmp"), 0o777))
// If argv[0] refers to a directory, we should also
// return a non-retriable error.
runner := runner.NewLocalRunner(buildDirectory, buildDirectoryPathBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: []string{"/"},
StdoutPath: "UnknownCommandDirectory/stdout",
StderrPath: "UnknownCommandDirectory/stderr",
InputRootDirectory: "UnknownCommandDirectory/root",
TemporaryDirectory: "UnknownCommandDirectory/tmp",
})
testutil.RequirePrefixedStatus(t, status.Error(codes.InvalidArgument, "Failed to start process: "), err)
})
t.Run("BuildDirectoryEscape", func(t *testing.T) {
buildDirectory := mock.NewMockDirectory(ctrl)
helloDirectory := mock.NewMockDirectoryCloser(ctrl)
buildDirectory.EXPECT().EnterDirectory(path.MustNewComponent("hello")).Return(helloDirectory, nil)
helloDirectory.EXPECT().Close()
// The runner process may need to run with elevated
// privileges. It shouldn't be possible to trick the
// runner into opening files outside the build
// directory.
runner := runner.NewLocalRunner(buildDirectory, &path.EmptyBuilder, runner.NewPlainCommandCreator(&syscall.SysProcAttr{}), false)
_, err := runner.Run(context.Background(), &runner_pb.RunRequest{
Arguments: getEnvCommand,
StdoutPath: "hello/../../../../../../etc/passwd",
StderrPath: "stderr",
InputRootDirectory: ".",
TemporaryDirectory: ".",
})
require.Equal(
t,
err,
status.Error(codes.InvalidArgument, "Failed to open stdout path \"hello/../../../../../../etc/passwd\": Path resolves to a location outside the build directory"))
})
// TODO: Improve testing coverage of LocalRunner.
}
| [
"\"SYSTEMROOT\""
]
| []
| [
"SYSTEMROOT"
]
| [] | ["SYSTEMROOT"] | go | 1 | 0 | |
Solar/wsgi.py | """
WSGI config for Solar project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Solar.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
backend/black_disk_31503/wsgi.py | """
WSGI config for black_disk_31503 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'black_disk_31503.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
kubernetes-client/src/main/java/io/fabric8/kubernetes/client/Config.java | /**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.kubernetes.client;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.fabric8.kubernetes.api.model.ConfigBuilder;
import okhttp3.TlsVersion;
import io.fabric8.kubernetes.api.model.AuthInfo;
import io.fabric8.kubernetes.api.model.Cluster;
import io.fabric8.kubernetes.api.model.Context;
import io.fabric8.kubernetes.client.internal.KubeConfigUtils;
import io.fabric8.kubernetes.client.internal.SSLUtils;
import io.fabric8.kubernetes.client.utils.IOHelpers;
import io.fabric8.kubernetes.client.utils.Serialization;
import io.fabric8.kubernetes.client.utils.Utils;
import io.sundr.builder.annotations.Buildable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.charset.StandardCharsets;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static okhttp3.TlsVersion.TLS_1_2;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true, allowGetters = true, allowSetters = true)
public class Config {
private static final Logger LOGGER = LoggerFactory.getLogger(Config.class);
public static final String KUBERNETES_MASTER_SYSTEM_PROPERTY = "kubernetes.master";
public static final String KUBERNETES_API_VERSION_SYSTEM_PROPERTY = "kubernetes.api.version";
public static final String KUBERNETES_TRUST_CERT_SYSTEM_PROPERTY = "kubernetes.trust.certificates";
public static final String KUBERNETES_DISABLE_HOSTNAME_VERIFICATION_SYSTEM_PROPERTY = "kubernetes.disable.hostname.verification";
public static final String KUBERNETES_CA_CERTIFICATE_FILE_SYSTEM_PROPERTY = "kubernetes.certs.ca.file";
public static final String KUBERNETES_CA_CERTIFICATE_DATA_SYSTEM_PROPERTY = "kubernetes.certs.ca.data";
public static final String KUBERNETES_CLIENT_CERTIFICATE_FILE_SYSTEM_PROPERTY = "kubernetes.certs.client.file";
public static final String KUBERNETES_CLIENT_CERTIFICATE_DATA_SYSTEM_PROPERTY = "kubernetes.certs.client.data";
public static final String KUBERNETES_CLIENT_KEY_FILE_SYSTEM_PROPERTY = "kubernetes.certs.client.key.file";
public static final String KUBERNETES_CLIENT_KEY_DATA_SYSTEM_PROPERTY = "kubernetes.certs.client.key.data";
public static final String KUBERNETES_CLIENT_KEY_ALGO_SYSTEM_PROPERTY = "kubernetes.certs.client.key.algo";
public static final String KUBERNETES_CLIENT_KEY_PASSPHRASE_SYSTEM_PROPERTY = "kubernetes.certs.client.key.passphrase";
public static final String KUBERNETES_AUTH_BASIC_USERNAME_SYSTEM_PROPERTY = "kubernetes.auth.basic.username";
public static final String KUBERNETES_AUTH_BASIC_PASSWORD_SYSTEM_PROPERTY = "kubernetes.auth.basic.password";
public static final String KUBERNETES_AUTH_TRYKUBECONFIG_SYSTEM_PROPERTY = "kubernetes.auth.tryKubeConfig";
public static final String KUBERNETES_AUTH_TRYSERVICEACCOUNT_SYSTEM_PROPERTY = "kubernetes.auth.tryServiceAccount";
public static final String KUBERNETES_OAUTH_TOKEN_SYSTEM_PROPERTY = "kubernetes.auth.token";
public static final String KUBERNETES_WATCH_RECONNECT_INTERVAL_SYSTEM_PROPERTY = "kubernetes.watch.reconnectInterval";
public static final String KUBERNETES_WATCH_RECONNECT_LIMIT_SYSTEM_PROPERTY = "kubernetes.watch.reconnectLimit";
public static final String KUBERNETES_CONNECTION_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.connection.timeout";
public static final String KUBERNETES_REQUEST_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.request.timeout";
public static final String KUBERNETES_ROLLING_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.rolling.timeout";
public static final String KUBERNETES_LOGGING_INTERVAL_SYSTEM_PROPERTY = "kubernetes.logging.interval";
public static final String KUBERNETES_SCALE_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.scale.timeout";
public static final String KUBERNETES_WEBSOCKET_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.websocket.timeout";
public static final String KUBERNETES_WEBSOCKET_PING_INTERVAL_SYSTEM_PROPERTY = "kubernetes.websocket.ping.interval";
public static final String KUBERNETES_MAX_CONCURRENT_REQUESTS ="kubernetes.max.concurrent.requests";
public static final String KUBERNETES_MAX_CONCURRENT_REQUESTS_PER_HOST ="kubernetes.max.concurrent.requests.per.host";
public static final String KUBERNETES_IMPERSONATE_USERNAME = "kubernetes.impersonate.username";
public static final String KUBERNETES_IMPERSONATE_GROUP = "kubernetes.impersonate.group";
public static final String KUBERNETES_TRUSTSTORE_PASSPHRASE_PROPERTY = "kubernetes.truststore.passphrase";
public static final String KUBERNETES_TRUSTSTORE_FILE_PROPERTY = "kubernetes.truststore.file";
public static final String KUBERNETES_KEYSTORE_PASSPHRASE_PROPERTY = "kubernetes.keystore.passphrase";
public static final String KUBERNETES_KEYSTORE_FILE_PROPERTY = "kubernetes.keystore.file";
public static final String KUBERNETES_TLS_VERSIONS = "kubernetes.tls.versions";
public static final String KUBERNETES_TRYNAMESPACE_PATH_SYSTEM_PROPERTY = "kubernetes.tryNamespacePath";
public static final String KUBERNETES_NAMESPACE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
public static final String KUBERNETES_NAMESPACE_FILE = "kubenamespace";
public static final String KUBERNETES_NAMESPACE_SYSTEM_PROPERTY = "kubernetes.namespace";
public static final String KUBERNETES_KUBECONFIG_FILE = "kubeconfig";
public static final String KUBERNETES_SERVICE_HOST_PROPERTY = "KUBERNETES_SERVICE_HOST";
public static final String KUBERNETES_SERVICE_PORT_PROPERTY = "KUBERNETES_SERVICE_PORT";
public static final String KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/token";
public static final String KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt";
public static final String KUBERNETES_HTTP_PROXY = "http.proxy";
public static final String KUBERNETES_HTTPS_PROXY = "https.proxy";
public static final String KUBERNETES_ALL_PROXY = "all.proxy";
public static final String KUBERNETES_NO_PROXY = "no.proxy";
public static final String KUBERNETES_PROXY_USERNAME = "proxy.username";
public static final String KUBERNETES_PROXY_PASSWORD = "proxy.password";
public static final String KUBERNETES_USER_AGENT = "fabric8-kubernetes-client/" + Version.clientVersion() ;
public static final String DEFAULT_MASTER_URL = "https://kubernetes.default.svc";
public static final Long DEFAULT_ROLLING_TIMEOUT = 15 * 60 * 1000L;
public static final Long DEFAULT_SCALE_TIMEOUT = 10 * 60 * 1000L;
public static final int DEFAULT_LOGGING_INTERVAL = 20 * 1000;
public static final Long DEFAULT_WEBSOCKET_TIMEOUT = 5 * 1000L;
public static final Long DEFAULT_WEBSOCKET_PING_INTERVAL = 1 * 1000L;
public static final Integer DEFAULT_MAX_CONCURRENT_REQUESTS = 64;
public static final Integer DEFAULT_MAX_CONCURRENT_REQUESTS_PER_HOST = 5;
public static final String HTTP_PROTOCOL_PREFIX = "http://";
public static final String HTTPS_PROTOCOL_PREFIX = "https://";
private static final String ACCESS_TOKEN = "access-token";
private boolean trustCerts;
private boolean disableHostnameVerification;
private String masterUrl = DEFAULT_MASTER_URL;
private String apiVersion = "v1";
private String namespace;
private String caCertFile;
private String caCertData;
private String clientCertFile;
private String clientCertData;
private String clientKeyFile;
private String clientKeyData;
private String clientKeyAlgo = "RSA";
private String clientKeyPassphrase = "changeit";
private String trustStoreFile;
private String trustStorePassphrase;
private String keyStoreFile;
private String keyStorePassphrase;
private RequestConfig requestConfig = new RequestConfig();
/**
* fields not used but needed for builder generation.
*/
private String username;
private String password;
private String oauthToken;
private int watchReconnectInterval = 1000;
private int watchReconnectLimit = -1;
private int connectionTimeout = 10 * 1000;
private int requestTimeout = 10 * 1000;
private long rollingTimeout = DEFAULT_ROLLING_TIMEOUT;
private long scaleTimeout = DEFAULT_SCALE_TIMEOUT;
private int loggingInterval = DEFAULT_LOGGING_INTERVAL;
private long websocketTimeout = DEFAULT_WEBSOCKET_TIMEOUT;
private long websocketPingInterval = DEFAULT_WEBSOCKET_PING_INTERVAL;
private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS;
private int maxConcurrentRequestsPerHost = DEFAULT_MAX_CONCURRENT_REQUESTS_PER_HOST;
private String impersonateUsername;
/**
* @deprecated use impersonateGroups instead
*/
@Deprecated
private String impersonateGroup;
private String[] impersonateGroups;
private Map<String, List<String>> impersonateExtras;
/**
* end of fields not used but needed for builder generation.
*/
private String httpProxy;
private String httpsProxy;
private String proxyUsername;
private String proxyPassword;
private String[] noProxy;
private String userAgent;
private TlsVersion[] tlsVersions = new TlsVersion[]{TLS_1_2};
private Map<Integer, String> errorMessages = new HashMap<>();
//In future releases (2.4.x) the public constructor will be empty.
//The current functionality will be provided by autoConfigure().
//This is a necessary change to allow us distinguish between auto configured values and builder values.
@Deprecated
public Config() {
autoConfigure(this, null);
}
/**
* @param context if null will use current-context
*/
public static Config autoConfigure(String context) {
Config config = new Config();
return autoConfigure(config, context);
}
private static Config autoConfigure(Config config, String context) {
if (!tryKubeConfig(config, context)) {
tryServiceAccount(config);
tryNamespaceFromPath(config);
}
configFromSysPropsOrEnvVars(config);
if (!config.masterUrl.toLowerCase().startsWith(HTTP_PROTOCOL_PREFIX) && !config.masterUrl.toLowerCase().startsWith(HTTPS_PROTOCOL_PREFIX)) {
config.masterUrl = (SSLUtils.isHttpsAvailable(config) ? HTTPS_PROTOCOL_PREFIX : HTTP_PROTOCOL_PREFIX) + config.masterUrl;
}
if (!config.masterUrl.endsWith("/")) {
config.masterUrl = config.masterUrl + "/";
}
return config;
}
@Buildable(builderPackage = "io.fabric8.kubernetes.api.builder", editableEnabled = false)
public Config(String masterUrl, String apiVersion, String namespace, boolean trustCerts, boolean disableHostnameVerification, String caCertFile, String caCertData, String clientCertFile, String clientCertData, String clientKeyFile, String clientKeyData, String clientKeyAlgo, String clientKeyPassphrase, String username, String password, String oauthToken, int watchReconnectInterval, int watchReconnectLimit, int connectionTimeout, int requestTimeout, long rollingTimeout, long scaleTimeout, int loggingInterval, int maxConcurrentRequestsPerHost, String httpProxy, String httpsProxy, String[] noProxy, Map<Integer, String> errorMessages, String userAgent, TlsVersion[] tlsVersions, long websocketTimeout, long websocketPingInterval, String proxyUsername, String proxyPassword, String trustStoreFile, String trustStorePassphrase, String keyStoreFile, String keyStorePassphrase, String impersonateUsername, String[] impersonateGroups, Map<String, List<String>> impersonateExtras) {
this.masterUrl = masterUrl;
this.apiVersion = apiVersion;
this.namespace = namespace;
this.trustCerts = trustCerts;
this.disableHostnameVerification = disableHostnameVerification;
this.caCertFile = caCertFile;
this.caCertData = caCertData;
this.clientCertFile = clientCertFile;
this.clientCertData = clientCertData;
this.clientKeyFile = clientKeyFile;
this.clientKeyData = clientKeyData;
this.clientKeyAlgo = clientKeyAlgo;
this.clientKeyPassphrase = clientKeyPassphrase;
this.requestConfig = new RequestConfig(username, password, oauthToken, watchReconnectLimit, watchReconnectInterval, connectionTimeout, rollingTimeout, requestTimeout, scaleTimeout, loggingInterval, websocketTimeout, websocketPingInterval, maxConcurrentRequests, maxConcurrentRequestsPerHost);
this.requestConfig.setImpersonateUsername(impersonateUsername);
this.requestConfig.setImpersonateGroups(impersonateGroups);
this.requestConfig.setImpersonateExtras(impersonateExtras);
this.httpProxy= httpProxy;
this.httpsProxy= httpsProxy;
this.noProxy= noProxy;
this.proxyUsername = proxyUsername;
this.proxyPassword = proxyPassword;
this.errorMessages = errorMessages;
this.userAgent = userAgent;
this.tlsVersions = tlsVersions;
if (!this.masterUrl.toLowerCase().startsWith(HTTP_PROTOCOL_PREFIX) && !this.masterUrl.startsWith(HTTPS_PROTOCOL_PREFIX)) {
this.masterUrl = (SSLUtils.isHttpsAvailable(this) ? HTTPS_PROTOCOL_PREFIX : HTTP_PROTOCOL_PREFIX) + this.masterUrl;
}
if (!this.masterUrl.endsWith("/")) {
this.masterUrl = this.masterUrl + "/";
}
this.trustStoreFile = trustStoreFile;
this.trustStorePassphrase = trustStorePassphrase;
this.keyStoreFile = keyStoreFile;
this.keyStorePassphrase = keyStorePassphrase;
}
public static void configFromSysPropsOrEnvVars(Config config) {
config.setTrustCerts(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUST_CERT_SYSTEM_PROPERTY, config.isTrustCerts()));
config.setDisableHostnameVerification(Utils.getSystemPropertyOrEnvVar(KUBERNETES_DISABLE_HOSTNAME_VERIFICATION_SYSTEM_PROPERTY, config.isDisableHostnameVerification()));
config.setMasterUrl(Utils.getSystemPropertyOrEnvVar(KUBERNETES_MASTER_SYSTEM_PROPERTY, config.getMasterUrl()));
config.setApiVersion(Utils.getSystemPropertyOrEnvVar(KUBERNETES_API_VERSION_SYSTEM_PROPERTY, config.getApiVersion()));
config.setNamespace(Utils.getSystemPropertyOrEnvVar(KUBERNETES_NAMESPACE_SYSTEM_PROPERTY, config.getNamespace()));
config.setCaCertFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CA_CERTIFICATE_FILE_SYSTEM_PROPERTY, config.getCaCertFile()));
config.setCaCertData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CA_CERTIFICATE_DATA_SYSTEM_PROPERTY, config.getCaCertData()));
config.setClientCertFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_CERTIFICATE_FILE_SYSTEM_PROPERTY, config.getClientCertFile()));
config.setClientCertData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_CERTIFICATE_DATA_SYSTEM_PROPERTY, config.getClientCertData()));
config.setClientKeyFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_FILE_SYSTEM_PROPERTY, config.getClientKeyFile()));
config.setClientKeyData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_DATA_SYSTEM_PROPERTY, config.getClientKeyData()));
config.setClientKeyAlgo(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_ALGO_SYSTEM_PROPERTY, config.getClientKeyAlgo()));
config.setClientKeyPassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_PASSPHRASE_SYSTEM_PROPERTY, new String(config.getClientKeyPassphrase())));
config.setUserAgent(Utils.getSystemPropertyOrEnvVar(KUBERNETES_USER_AGENT, config.getUserAgent()));
config.setTrustStorePassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUSTSTORE_PASSPHRASE_PROPERTY, config.getTrustStorePassphrase()));
config.setTrustStoreFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUSTSTORE_FILE_PROPERTY, config.getTrustStoreFile()));
config.setKeyStorePassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_KEYSTORE_PASSPHRASE_PROPERTY, config.getKeyStorePassphrase()));
config.setKeyStoreFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_KEYSTORE_FILE_PROPERTY, config.getKeyStoreFile()));
config.setOauthToken(Utils.getSystemPropertyOrEnvVar(KUBERNETES_OAUTH_TOKEN_SYSTEM_PROPERTY, config.getOauthToken()));
config.setUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_BASIC_USERNAME_SYSTEM_PROPERTY, config.getUsername()));
config.setPassword(Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_BASIC_PASSWORD_SYSTEM_PROPERTY, config.getPassword()));
config.setImpersonateUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_IMPERSONATE_USERNAME, config.getImpersonateUsername()));
String configuredImpersonateGroups = Utils.getSystemPropertyOrEnvVar(KUBERNETES_IMPERSONATE_GROUP, config.getImpersonateGroup());
if (configuredImpersonateGroups != null) {
config.setImpersonateGroups(configuredImpersonateGroups.split(","));
}
String configuredWatchReconnectInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WATCH_RECONNECT_INTERVAL_SYSTEM_PROPERTY);
if (configuredWatchReconnectInterval != null) {
config.setWatchReconnectInterval(Integer.parseInt(configuredWatchReconnectInterval));
}
String configuredWatchReconnectLimit = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WATCH_RECONNECT_LIMIT_SYSTEM_PROPERTY);
if (configuredWatchReconnectLimit != null) {
config.setWatchReconnectLimit(Integer.parseInt(configuredWatchReconnectLimit));
}
String configuredRollingTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_ROLLING_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(DEFAULT_ROLLING_TIMEOUT));
if (configuredRollingTimeout != null) {
config.setRollingTimeout(Long.parseLong(configuredRollingTimeout));
}
String configuredScaleTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SCALE_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(DEFAULT_SCALE_TIMEOUT));
if (configuredScaleTimeout != null) {
config.setScaleTimeout(Long.parseLong(configuredScaleTimeout));
}
String configuredLoggingInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_LOGGING_INTERVAL_SYSTEM_PROPERTY, String.valueOf(DEFAULT_LOGGING_INTERVAL));
if (configuredLoggingInterval != null) {
config.setLoggingInterval(Integer.parseInt(configuredLoggingInterval));
}
config.setConnectionTimeout(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CONNECTION_TIMEOUT_SYSTEM_PROPERTY, config.getConnectionTimeout()));
config.setRequestTimeout(Utils.getSystemPropertyOrEnvVar(KUBERNETES_REQUEST_TIMEOUT_SYSTEM_PROPERTY, config.getRequestTimeout()));
String configuredWebsocketTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WEBSOCKET_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(config.getWebsocketTimeout()));
if (configuredWebsocketTimeout != null) {
config.setWebsocketTimeout(Long.parseLong(configuredWebsocketTimeout));
}
String configuredWebsocketPingInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WEBSOCKET_PING_INTERVAL_SYSTEM_PROPERTY, String.valueOf(config.getWebsocketPingInterval()));
if (configuredWebsocketPingInterval != null) {
config.setWebsocketPingInterval(Long.parseLong(configuredWebsocketPingInterval));
}
String configuredMaxConcurrentReqeustsPerHost = Utils.getSystemPropertyOrEnvVar(KUBERNETES_MAX_CONCURRENT_REQUESTS_PER_HOST, String.valueOf(config.getMaxConcurrentRequestsPerHost()));
if (configuredMaxConcurrentReqeustsPerHost != null) {
config.setMaxConcurrentRequestsPerHost(Integer.parseInt(configuredMaxConcurrentReqeustsPerHost));
}
config.setHttpProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_ALL_PROXY, config.getHttpProxy()));
config.setHttpsProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_ALL_PROXY, config.getHttpsProxy()));
config.setHttpsProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_HTTPS_PROXY, config.getHttpsProxy()));
config.setHttpProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_HTTP_PROXY, config.getHttpProxy()));
config.setProxyUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_PROXY_USERNAME, config.getProxyUsername()));
config.setProxyPassword(Utils.getSystemPropertyOrEnvVar(KUBERNETES_PROXY_PASSWORD, config.getProxyPassword()));
String noProxyVar = Utils.getSystemPropertyOrEnvVar(KUBERNETES_NO_PROXY);
if (noProxyVar != null) {
config.setNoProxy(noProxyVar.split(","));
}
String tlsVersionsVar = Utils.getSystemPropertyOrEnvVar(KUBERNETES_TLS_VERSIONS);
if (tlsVersionsVar != null && !tlsVersionsVar.isEmpty()) {
String[] tlsVersionsSplit = tlsVersionsVar.split(",");
TlsVersion[] tlsVersions = new TlsVersion[tlsVersionsSplit.length];
for (int i = 0; i < tlsVersionsSplit.length; i++) {
tlsVersions[i] = TlsVersion.forJavaName(tlsVersionsSplit[i]);
}
config.setTlsVersions(tlsVersions);
}
}
private static boolean tryServiceAccount(Config config) {
LOGGER.debug("Trying to configure client from service account...");
String masterHost = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SERVICE_HOST_PROPERTY, (String) null);
String masterPort = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SERVICE_PORT_PROPERTY, (String) null);
if (masterHost != null && masterPort != null) {
String hostPort = joinHostPort(masterHost, masterPort);
LOGGER.debug("Found service account host and port: " + hostPort);
config.setMasterUrl("https://" + hostPort);
}
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_TRYSERVICEACCOUNT_SYSTEM_PROPERTY, true)) {
boolean serviceAccountCaCertExists = Files.isRegularFile(new File(KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH).toPath());
if (serviceAccountCaCertExists) {
LOGGER.debug("Found service account ca cert at: ["+KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH+"].");
config.setCaCertFile(KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH);
} else {
LOGGER.debug("Did not find service account ca cert at: ["+KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH+"].");
}
try {
String serviceTokenCandidate = new String(Files.readAllBytes(new File(KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH).toPath()));
if (serviceTokenCandidate != null) {
LOGGER.debug("Found service account token at: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"].");
config.setOauthToken(serviceTokenCandidate);
String txt = "Configured service account doesn't have access. Service account may have been revoked.";
config.getErrorMessages().put(401, "Unauthorized! " + txt);
config.getErrorMessages().put(403, "Forbidden!" + txt);
return true;
} else {
LOGGER.debug("Did not find service account token at: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"].");
}
} catch (IOException e) {
// No service account token available...
LOGGER.warn("Error reading service account token from: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"]. Ignoring.");
}
}
return false;
}
private static String joinHostPort(String host, String port) {
if (host.indexOf(':') >= 0) {
// Host is an IPv6
return "[" + host + "]:" + port;
}
return host + ":" + port;
}
private static String absolutify(File relativeTo, String filename) {
if (filename == null) {
return null;
}
File file = new File(filename);
if (file.isAbsolute()) {
return file.getAbsolutePath();
}
return new File(relativeTo.getParentFile(), filename).getAbsolutePath();
}
public static Config fromKubeconfig(String kubeconfigContents) {
return fromKubeconfig(null, kubeconfigContents, null);
}
// Note: kubeconfigPath is optional (see note on loadFromKubeConfig)
public static Config fromKubeconfig(String context, String kubeconfigContents, String kubeconfigPath) {
// we allow passing context along here, since downstream accepts it
Config config = new Config();
Config.loadFromKubeconfig(config, context, kubeconfigContents, kubeconfigPath);
return config;
}
private static boolean tryKubeConfig(Config config, String context) {
LOGGER.debug("Trying to configure client from Kubernetes config...");
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_TRYKUBECONFIG_SYSTEM_PROPERTY, true)) {
File kubeConfigFile = new File(
Utils.getSystemPropertyOrEnvVar(KUBERNETES_KUBECONFIG_FILE, new File(getHomeDir(), ".kube" + File.separator + "config").toString()));
boolean kubeConfigFileExists = Files.isRegularFile(kubeConfigFile.toPath());
if (kubeConfigFileExists) {
LOGGER.debug("Found for Kubernetes config at: ["+kubeConfigFile.getPath()+"].");
String kubeconfigContents;
try {
kubeconfigContents = new String(Files.readAllBytes(kubeConfigFile.toPath()), StandardCharsets.UTF_8);
} catch(IOException e) {
LOGGER.error("Could not load Kubernetes config file from {}", kubeConfigFile.getPath(), e);
return false;
}
Config.loadFromKubeconfig(config, context, kubeconfigContents, kubeConfigFile.getPath());
return true;
} else {
LOGGER.debug("Did not find Kubernetes config at: ["+kubeConfigFile.getPath()+"]. Ignoring.");
}
}
return false;
}
// Note: kubeconfigPath is optional
// It is only used to rewrite relative tls asset paths inside kubeconfig when a file is passed, and in the case that
// the kubeconfig references some assets via relative paths.
private static boolean loadFromKubeconfig(Config config, String context, String kubeconfigContents, String kubeconfigPath) {
try {
io.fabric8.kubernetes.api.model.Config kubeConfig = KubeConfigUtils.parseConfigFromString(kubeconfigContents);
if (context != null) {
kubeConfig.setCurrentContext(context);
}
Context currentContext = KubeConfigUtils.getCurrentContext(kubeConfig);
Cluster currentCluster = KubeConfigUtils.getCluster(kubeConfig, currentContext);
if (currentCluster != null) {
config.setMasterUrl(currentCluster.getServer());
config.setNamespace(currentContext.getNamespace());
config.setTrustCerts(currentCluster.getInsecureSkipTlsVerify() != null && currentCluster.getInsecureSkipTlsVerify());
config.setDisableHostnameVerification(currentCluster.getInsecureSkipTlsVerify() != null && currentCluster.getInsecureSkipTlsVerify());
config.setCaCertData(currentCluster.getCertificateAuthorityData());
AuthInfo currentAuthInfo = KubeConfigUtils.getUserAuthInfo(kubeConfig, currentContext);
if (currentAuthInfo != null) {
// rewrite tls asset paths if needed
String caCertFile = currentCluster.getCertificateAuthority();
String clientCertFile = currentAuthInfo.getClientCertificate();
String clientKeyFile = currentAuthInfo.getClientKey();
if (kubeconfigPath != null && !kubeconfigPath.isEmpty()) {
caCertFile = absolutify(new File(kubeconfigPath), currentCluster.getCertificateAuthority());
clientCertFile = absolutify(new File(kubeconfigPath), currentAuthInfo.getClientCertificate());
clientKeyFile = absolutify(new File(kubeconfigPath), currentAuthInfo.getClientKey());
}
config.setCaCertFile(caCertFile);
config.setClientCertFile(clientCertFile);
config.setClientCertData(currentAuthInfo.getClientCertificateData());
config.setClientKeyFile(clientKeyFile);
config.setClientKeyData(currentAuthInfo.getClientKeyData());
config.setOauthToken(currentAuthInfo.getToken());
config.setUsername(currentAuthInfo.getUsername());
config.setPassword(currentAuthInfo.getPassword());
if (Utils.isNullOrEmpty(config.getOauthToken()) && currentAuthInfo.getAuthProvider() != null && !Utils.isNullOrEmpty(currentAuthInfo.getAuthProvider().getConfig().get(ACCESS_TOKEN))) {
config.setOauthToken(currentAuthInfo.getAuthProvider().getConfig().get(ACCESS_TOKEN));
} else { // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
Object _exec = currentAuthInfo.getAdditionalProperties().get("exec");
if (_exec instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> exec = (Map) _exec;
String apiVersion = (String) exec.get("apiVersion");
if ("client.authentication.k8s.io/v1alpha1".equals(apiVersion)) {
List<String> argv = new ArrayList<String>();
String command = (String) exec.get("command");
if (command.contains("/") && !command.startsWith("/") && kubeconfigPath != null && !kubeconfigPath.isEmpty()) {
// Appears to be a relative path; normalize. Spec is vague about how to detect this situation.
command = Paths.get(kubeconfigPath).resolveSibling(command).normalize().toString();
}
argv.add(command);
@SuppressWarnings("unchecked")
List<String> args = (List) exec.get("args");
if (args != null) {
argv.addAll(args);
}
ProcessBuilder pb = new ProcessBuilder(argv);
@SuppressWarnings("unchecked")
List<Map<String, String>> env = (List<Map<String, String>>) exec.get("env");
if (env != null) {
Map<String, String> environment = pb.environment();
env.forEach(pair -> environment.put(pair.get("name"), pair.get("value")));
}
// TODO check behavior of tty & stdin
Process p = pb.start();
if (p.waitFor() != 0) {
LOGGER.warn(IOHelpers.readFully(p.getErrorStream()));
}
ExecCredential ec = Serialization.unmarshal(p.getInputStream(), ExecCredential.class);
if (!apiVersion.equals(ec.apiVersion)) {
LOGGER.warn("Wrong apiVersion {} vs. {}", ec.apiVersion, apiVersion);
}
if (ec.status != null && ec.status.token != null) {
config.setOauthToken(ec.status.token);
} else {
LOGGER.warn("No token returned");
}
} else { // TODO v1beta1?
LOGGER.warn("Unsupported apiVersion: {}", apiVersion);
}
}
}
config.getErrorMessages().put(401, "Unauthorized! Token may have expired! Please log-in again.");
config.getErrorMessages().put(403, "Forbidden! User "+currentContext.getUser()+ " doesn't have permission.");
}
return true;
}
} catch (Exception e) {
LOGGER.error("Failed to parse the kubeconfig.", e);
}
return false;
}
@JsonIgnoreProperties(ignoreUnknown = true)
private static final class ExecCredential {
public String kind;
public String apiVersion;
public ExecCredentialSpec spec;
public ExecCredentialStatus status;
}
@JsonIgnoreProperties(ignoreUnknown = true)
private static final class ExecCredentialSpec {}
@JsonIgnoreProperties(ignoreUnknown = true)
private static final class ExecCredentialStatus {
public String token;
// TODO clientCertificateData, clientKeyData, expirationTimestamp
}
private static boolean tryNamespaceFromPath(Config config) {
LOGGER.debug("Trying to configure client namespace from Kubernetes service account namespace path...");
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRYNAMESPACE_PATH_SYSTEM_PROPERTY, true)) {
String serviceAccountNamespace = Utils.getSystemPropertyOrEnvVar(KUBERNETES_NAMESPACE_FILE, KUBERNETES_NAMESPACE_PATH);
boolean serviceAccountNamespaceExists = Files.isRegularFile(new File(serviceAccountNamespace).toPath());
if (serviceAccountNamespaceExists) {
LOGGER.debug("Found service account namespace at: [" + serviceAccountNamespace + "].");
try {
String namespace = new String(Files.readAllBytes(new File(serviceAccountNamespace).toPath()));
config.setNamespace(namespace.replace(System.lineSeparator(), ""));
return true;
} catch (IOException e) {
LOGGER.error("Error reading service account namespace from: [" + serviceAccountNamespace + "].", e);
}
} else {
LOGGER.debug("Did not find service account namespace at: [" + serviceAccountNamespace + "]. Ignoring.");
}
}
return false;
}
private static String getHomeDir() {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.startsWith("win")) {
String homeDrive = System.getenv("HOMEDRIVE");
String homePath = System.getenv("HOMEPATH");
if (homeDrive != null && !homeDrive.isEmpty() && homePath != null && !homePath.isEmpty()) {
String homeDir = homeDrive + homePath;
File f = new File(homeDir);
if (f.exists() && f.isDirectory()) {
return homeDir;
}
}
String userProfile = System.getenv("USERPROFILE");
if (userProfile != null && !userProfile.isEmpty()) {
File f = new File(userProfile);
if (f.exists() && f.isDirectory()) {
return userProfile;
}
}
}
String home = System.getenv("HOME");
if (home != null && !home.isEmpty()) {
File f = new File(home);
if (f.exists() && f.isDirectory()) {
return home;
}
}
//Fall back to user.home should never really get here
return System.getProperty("user.home", ".");
}
@JsonProperty("oauthToken")
public String getOauthToken() {
return getRequestConfig().getOauthToken();
}
public void setOauthToken(String oauthToken) {
this.requestConfig.setOauthToken(oauthToken);
}
@JsonProperty("password")
public String getPassword() {
return getRequestConfig().getPassword();
}
public void setPassword(String password) {
this.requestConfig.setPassword(password);
}
@JsonProperty("username")
public String getUsername() {
return getRequestConfig().getUsername();
}
public void setUsername(String username) {
this.requestConfig.setUsername(username);
}
@JsonProperty("impersonateUsername")
public String getImpersonateUsername() {
return getRequestConfig().getImpersonateUsername();
}
public void setImpersonateUsername(String impersonateUsername) {
this.requestConfig.setImpersonateUsername(impersonateUsername);
}
@JsonProperty("impersonateGroups")
public String[] getImpersonateGroups() {
return getRequestConfig().getImpersonateGroups();
}
public void setImpersonateGroups(String... impersonateGroup) {
this.requestConfig.setImpersonateGroups(impersonateGroup);
}
/**
* @deprecated Use {@link #getImpersonateGroups()} instead
*/
@Deprecated
@JsonProperty("impersonateGroup")
public String getImpersonateGroup() {
return getRequestConfig().getImpersonateGroup();
}
/**
* @deprecated Use {@link #setImpersonateGroups(String...)} instead
*/
@Deprecated
public void setImpersonateGroup(String impersonateGroup) {
this.requestConfig.setImpersonateGroups(impersonateGroup);
}
@JsonProperty("impersonateExtras")
public Map<String, List<String>> getImpersonateExtras() {
return getRequestConfig().getImpersonateExtras();
}
public void setImpersonateExtras(Map<String, List<String>> impersonateExtras) {
this.requestConfig.setImpersonateExtras(impersonateExtras);
}
@JsonProperty("clientKeyPassphrase")
public String getClientKeyPassphrase() {
return clientKeyPassphrase;
}
public void setClientKeyPassphrase(String clientKeyPassphrase) {
this.clientKeyPassphrase = clientKeyPassphrase;
}
@JsonProperty("clientKeyAlgo")
public String getClientKeyAlgo() {
return clientKeyAlgo;
}
public void setClientKeyAlgo(String clientKeyAlgo) {
this.clientKeyAlgo = clientKeyAlgo;
}
@JsonProperty("clientKeyData")
public String getClientKeyData() {
return clientKeyData;
}
public void setClientKeyData(String clientKeyData) {
this.clientKeyData = clientKeyData;
}
@JsonProperty("clientKeyFile")
public String getClientKeyFile() {
return clientKeyFile;
}
public void setClientKeyFile(String clientKeyFile) {
this.clientKeyFile = clientKeyFile;
}
@JsonProperty("clientCertData")
public String getClientCertData() {
return clientCertData;
}
public void setClientCertData(String clientCertData) {
this.clientCertData = clientCertData;
}
@JsonProperty("clientCertFile")
public String getClientCertFile() {
return clientCertFile;
}
public void setClientCertFile(String clientCertFile) {
this.clientCertFile = clientCertFile;
}
@JsonProperty("caCertData")
public String getCaCertData() {
return caCertData;
}
public void setCaCertData(String caCertData) {
this.caCertData = caCertData;
}
@JsonProperty("caCertFile")
public String getCaCertFile() {
return caCertFile;
}
public void setCaCertFile(String caCertFile) {
this.caCertFile = caCertFile;
}
@JsonProperty("apiVersion")
public String getApiVersion() {
return apiVersion;
}
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
@JsonProperty("masterUrl")
public String getMasterUrl() {
return masterUrl;
}
public void setMasterUrl(String masterUrl) {
this.masterUrl = masterUrl;
}
@JsonProperty("trustCerts")
public boolean isTrustCerts() {
return trustCerts;
}
public void setTrustCerts(boolean trustCerts) {
this.trustCerts = trustCerts;
}
@JsonProperty("disableHostnameVerification")
public boolean isDisableHostnameVerification() {
return disableHostnameVerification;
}
public void setDisableHostnameVerification(boolean disableHostnameVerification) {
this.disableHostnameVerification = disableHostnameVerification;
}
@JsonProperty("watchReconnectInterval")
public int getWatchReconnectInterval() {
return requestConfig.getWatchReconnectInterval();
}
public void setWatchReconnectInterval(int watchReconnectInterval) {
this.requestConfig.setWatchReconnectInterval(watchReconnectInterval);
}
@JsonProperty("watchReconnectLimit")
public int getWatchReconnectLimit() {
return getRequestConfig().getWatchReconnectLimit();
}
public void setWatchReconnectLimit(int watchReconnectLimit) {
this.requestConfig.setWatchReconnectLimit(watchReconnectLimit);
}
@JsonProperty("errorMessages")
public Map<Integer, String> getErrorMessages() {
return errorMessages;
}
public void setErrorMessages(Map<Integer, String> errorMessages) {
this.errorMessages = errorMessages;
}
public static ConfigBuilder builder() {
return new ConfigBuilder();
}
@JsonProperty("connectionTimeout")
public int getConnectionTimeout() {
return getRequestConfig().getConnectionTimeout();
}
public void setConnectionTimeout(int connectionTimeout) {
this.requestConfig.setConnectionTimeout(connectionTimeout);
}
@JsonProperty("requestTimeout")
public int getRequestTimeout() {
return getRequestConfig().getRequestTimeout();
}
public void setRequestTimeout(int requestTimeout) {
this.requestConfig.setRequestTimeout(requestTimeout);
}
@JsonProperty("rollingTimeout")
public long getRollingTimeout() {
return getRequestConfig().getRollingTimeout();
}
public void setRollingTimeout(long rollingTimeout) {
this.requestConfig.setRollingTimeout(rollingTimeout);
}
@JsonProperty("scaleTimeout")
public long getScaleTimeout() {
return getRequestConfig().getScaleTimeout();
}
public void setScaleTimeout(long scaleTimeout) {
this.requestConfig.setScaleTimeout(scaleTimeout);
}
@JsonProperty("loggingInterval")
public int getLoggingInterval() {
return getRequestConfig().getLoggingInterval();
}
public void setLoggingInterval(int loggingInterval) {
this.requestConfig.setLoggingInterval(loggingInterval);
}
public void setHttpProxy(String httpProxy) {
this.httpProxy= httpProxy;
}
@JsonProperty("httpProxy")
public String getHttpProxy() {
return httpProxy;
}
public void setHttpsProxy(String httpsProxy) {
this.httpsProxy= httpsProxy;
}
@JsonProperty("httpsProxy")
public String getHttpsProxy() {
return httpsProxy;
}
public void setNoProxy(String[] noProxy) {
this.noProxy = noProxy;
}
@JsonProperty("noProxy")
public String[] getNoProxy() {
return noProxy;
}
@JsonProperty("namespace")
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
@JsonProperty("userAgent")
public String getUserAgent() {
return userAgent;
}
public void setUserAgent(String userAgent) {
this.userAgent = userAgent;
}
@JsonProperty("tlsVersions")
public TlsVersion[] getTlsVersions() {
return tlsVersions;
}
public void setTlsVersions(TlsVersion[] tlsVersions) {
this.tlsVersions = tlsVersions;
}
@JsonProperty("websocketTimeout")
public long getWebsocketTimeout() {
return getRequestConfig().getWebsocketTimeout();
}
public void setWebsocketTimeout(long websocketTimeout) {
this.requestConfig.setWebsocketTimeout(websocketTimeout);
}
@JsonProperty("websocketPingInterval")
public long getWebsocketPingInterval() {
return getRequestConfig().getWebsocketPingInterval();
}
public void setWebsocketPingInterval(long websocketPingInterval) {
this.requestConfig.setWebsocketPingInterval(websocketPingInterval);
}
public int getMaxConcurrentRequests() {
return getRequestConfig().getMaxConcurrentRequests();
}
public void setMaxConcurrentRequests(int maxConcurrentRequests) {
this.requestConfig.setMaxConcurrentRequests(maxConcurrentRequests);
}
public int getMaxConcurrentRequestsPerHost() {
return getRequestConfig().getMaxConcurrentRequestsPerHost();
}
public void setMaxConcurrentRequestsPerHost(int maxConcurrentRequestsPerHost) {
this.requestConfig.setMaxConcurrentRequestsPerHost(maxConcurrentRequestsPerHost);
}
@JsonProperty("proxyUsername")
public String getProxyUsername() {
return proxyUsername;
}
public void setProxyUsername(String proxyUsername) {
this.proxyUsername = proxyUsername;
}
@JsonProperty("proxyPassword")
public String getProxyPassword() {
return proxyPassword;
}
public void setProxyPassword(String proxyPassword) {
this.proxyPassword = proxyPassword;
}
public RequestConfig getRequestConfig() {
RequestConfig rc = RequestConfigHolder.get();
return rc != null ? rc : this.requestConfig;
}
public void setTrustStorePassphrase(String trustStorePassphrase) {
this.trustStorePassphrase = trustStorePassphrase;
}
@JsonProperty("trustStorePassphrase")
public String getTrustStorePassphrase() {
return trustStorePassphrase;
}
public void setKeyStorePassphrase(String keyStorePassphrase) {
this.keyStorePassphrase = keyStorePassphrase;
}
@JsonProperty("keyStorePassphrase")
public String getKeyStorePassphrase() {
return keyStorePassphrase;
}
public void setTrustStoreFile(String trustStoreFile) {
this.trustStoreFile = trustStoreFile;
}
@JsonProperty("trustStoreFile")
public String getTrustStoreFile() {
return trustStoreFile;
}
public void setKeyStoreFile(String keyStoreFile) {
this.keyStoreFile = keyStoreFile;
}
@JsonProperty("keyStoreFile")
public String getKeyStoreFile() {
return keyStoreFile;
}
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
]
| [] | ["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"] | java | 4 | 0 | |
brewery_test.go | package brewerydb
import (
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"testing"
)
func TestBreweryGet(t *testing.T) {
setup()
defer teardown()
data := loadTestData("brewery.get.json", t)
defer data.Close()
const id = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
checkURLSuffix(t, r, id)
io.Copy(w, data)
})
b, err := client.Brewery.Get(id)
if err != nil {
t.Fatal(err)
}
if b.ID != id {
t.Fatalf("Brewery ID = %v, want %v", b.ID, id)
}
testBadURL(t, func() error {
_, err := client.Brewery.Get(id)
return err
})
}
func TestBreweryList(t *testing.T) {
setup()
defer teardown()
data := loadTestData("brewery.list.json", t)
defer data.Close()
const established = "1988"
mux.HandleFunc("/breweries", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
if v := r.FormValue("established"); v != established {
t.Fatalf("Request.FormValue established = %v, wanted %v", v, established)
}
// TODO: check more request query values
io.Copy(w, data)
})
bl, err := client.Brewery.List(&BreweryListRequest{Established: established})
if err != nil {
t.Fatal(err)
}
if len(bl.Breweries) <= 0 {
t.Fatal("Expected >0 breweries")
}
for _, b := range bl.Breweries {
if l := 6; l != len(b.ID) {
t.Fatalf("Brewery ID len = %d, wanted %d", len(b.ID), l)
}
}
testBadURL(t, func() error {
_, err := client.Brewery.List(&BreweryListRequest{Established: established})
return err
})
}
func makeTestBrewery() *Brewery {
return &Brewery{
ID: "jmGoBA",
Name: "Flying Dog Brewery",
Description: "Good people drink good beer.",
MailingListURL: "[email protected]",
Image: "https://s3.amazonaws.com/brewerydbapi/brewery/jmGoBA/upload_0z9L4W-large.png",
Established: "1983",
IsOrganic: true,
Website: "http://www.flyingdogales.com",
Status: "verified",
}
}
func TestBreweryAdd(t *testing.T) {
setup()
defer teardown()
brewery := makeTestBrewery()
const newID = "abcdef"
mux.HandleFunc("/breweries", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "POST")
if err := r.ParseForm(); err != nil {
http.Error(w, "failed to parse form", http.StatusBadRequest)
}
checkPostFormValue(t, r, "name", brewery.Name)
checkPostFormValue(t, r, "description", brewery.Description)
checkPostFormValue(t, r, "mailingListUrl", brewery.MailingListURL)
checkPostFormValue(t, r, "image", brewery.Image)
checkPostFormValue(t, r, "established", brewery.Established)
checkPostFormValue(t, r, "isOrganic", "Y")
checkPostFormValue(t, r, "website", brewery.Website)
// Check that fields tagged with "-" or "omitempty" are NOT encoded
checkPostFormDNE(t, r, "id", "ID", "status", "Status")
fmt.Fprintf(w, `{"status":"...", "data":{"id":"%s"}, "message":"..."}`, newID)
})
id, err := client.Brewery.Add(brewery)
if err != nil {
t.Fatal(err)
}
if id != newID {
t.Fatalf("new Brewery ID = %v, want %v", id, newID)
}
_, err = client.Brewery.Add(nil)
if err == nil {
t.Fatal("expected error regarding nil parameter")
}
testBadURL(t, func() error {
_, err = client.Brewery.Add(brewery)
return err
})
}
func TestBreweryUpdate(t *testing.T) {
setup()
defer teardown()
brewery := makeTestBrewery()
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "PUT")
checkURLSuffix(t, r, brewery.ID)
if err := r.ParseForm(); err != nil {
http.Error(w, "failed to parse form", http.StatusBadRequest)
}
checkPostFormValue(t, r, "name", brewery.Name)
checkPostFormValue(t, r, "description", brewery.Description)
checkPostFormValue(t, r, "mailingListUrl", brewery.MailingListURL)
checkPostFormValue(t, r, "image", brewery.Image)
checkPostFormValue(t, r, "established", brewery.Established)
checkPostFormValue(t, r, "isOrganic", "Y")
checkPostFormValue(t, r, "website", brewery.Website)
// Check that fields tagged with "-" or "omitempty" are NOT encoded
checkPostFormDNE(t, r, "id", "ID", "status", "Status")
})
if err := client.Brewery.Update(brewery.ID, brewery); err != nil {
t.Fatal(err)
}
if client.Brewery.Update(brewery.ID, nil) == nil {
t.Fatal("expected error regarding nil parameter")
}
testBadURL(t, func() error {
return client.Brewery.Update(brewery.ID, brewery)
})
}
func TestBreweryDelete(t *testing.T) {
setup()
defer teardown()
const id = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "DELETE")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" {
t.Fatal("bad URL, expected \"/brewery/:breweryId\"")
}
if split[2] != id {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
})
if err := client.Brewery.Delete(id); err != nil {
t.Fatal(err)
}
if err := client.Brewery.Delete("******"); err == nil {
t.Fatal("expected HTTP 404")
}
testBadURL(t, func() error {
return client.Brewery.Delete(id)
})
}
func TestBreweryGetSocialAccount(t *testing.T) {
setup()
defer teardown()
data := loadTestData("brewery.get.socialaccount.json", t)
defer data.Close()
const (
breweryID = "jmGoBA"
socialAccountID = 16
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "socialaccount" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/socialaccount/:socialaccountId\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if split[4] != strconv.Itoa(socialAccountID) {
http.Error(w, "invalid SocialAccount ID", http.StatusNotFound)
}
io.Copy(w, data)
})
a, err := client.Brewery.GetSocialAccount(breweryID, socialAccountID)
if err != nil {
t.Fatal(err)
}
if a.ID != socialAccountID {
t.Fatalf("SocialAccount ID = %v, want %v", a.ID, socialAccountID)
}
testBadURL(t, func() error {
_, err := client.Brewery.GetSocialAccount(breweryID, socialAccountID)
return err
})
}
func TestBreweryListSocialAccount(t *testing.T) {
setup()
defer teardown()
data := loadTestData("brewery.list.socialaccounts.json", t)
defer data.Close()
const breweryID = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "socialaccounts" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/socialaccounts\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
io.Copy(w, data)
})
al, err := client.Brewery.ListSocialAccounts(breweryID)
if err != nil {
t.Fatal(err)
}
if len(al) <= 0 {
t.Fatal("Expected >0 SocialAccounts")
}
for _, a := range al {
if a.ID <= 0 {
t.Fatal("Expected ID >0")
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListSocialAccounts(breweryID)
return err
})
}
func TestBreweryAddSocialAccount(t *testing.T) {
setup()
defer teardown()
account := &SocialAccount{
ID: 3,
SocialMediaID: 1,
SocialSite: SocialSite{
ID: 1,
Name: "Facebook Fan Page",
Website: "http://www.facebook.com",
},
Handle: "flying_dog",
}
const id = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "POST")
split := strings.Split(r.URL.Path, "/")
if split[3] != "socialaccounts" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/socialaccounts\"")
}
if split[2] != id {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
checkPostFormValue(t, r, "socialmediaId", strconv.Itoa(account.SocialMediaID))
checkPostFormValue(t, r, "handle", account.Handle)
checkPostFormDNE(t, r, "id", "ID", "socialMedia", "SocialSite")
})
if err := client.Brewery.AddSocialAccount(id, account); err != nil {
t.Fatal(err)
}
if client.Brewery.AddSocialAccount("******", account) == nil {
t.Fatal("expected HTTP error")
}
if client.Brewery.AddSocialAccount(id, nil) == nil {
t.Fatal("expected error regarding nil parameter")
}
testBadURL(t, func() error {
return client.Brewery.AddSocialAccount(id, account)
})
}
func TestBreweryUpdateSocialAccount(t *testing.T) {
setup()
defer teardown()
account := &SocialAccount{
ID: 3,
SocialMediaID: 1,
SocialSite: SocialSite{
ID: 1,
Name: "Facebook Fan Page",
Website: "http://www.facebook.com",
},
Handle: "flying_dog",
}
const id = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "PUT")
if err := r.ParseForm(); err != nil {
http.Error(w, "failed to parse form", http.StatusBadRequest)
}
split := strings.Split(r.URL.Path, "/")
if split[3] != "socialaccount" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/socialaccount/:socialaccountId\"")
}
if split[2] != id {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if split[4] != strconv.Itoa(account.ID) {
http.Error(w, "invalid SocialAccount ID", http.StatusNotFound)
}
checkPostFormValue(t, r, "socialmediaId", strconv.Itoa(account.SocialMediaID))
checkPostFormValue(t, r, "handle", account.Handle)
checkPostFormDNE(t, r, "id", "socialMedia", "SocialSite")
})
if err := client.Brewery.UpdateSocialAccount(id, account); err != nil {
t.Fatal(err)
}
if client.Brewery.UpdateSocialAccount("******", account) == nil {
t.Fatal("expected HTTP error")
}
if client.Brewery.UpdateSocialAccount(id, nil) == nil {
t.Fatal("expected error regarding nil parameter")
}
testBadURL(t, func() error {
return client.Brewery.UpdateSocialAccount(id, account)
})
}
func TestAddAlternateName(t *testing.T) {
setup()
defer teardown()
const (
breweryID = "jmGoBA"
altName = "Flying Dog"
newID = 3
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "POST")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "alternatenames" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/alternatenames\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
checkPostFormValue(t, r, "name", altName)
fmt.Fprintf(w, `{"status":"...", "data":{"id":%d}, "message":"..."}`, newID)
})
id, err := client.Brewery.AddAlternateName(breweryID, altName)
if err != nil {
t.Fatal(err)
}
if id != newID {
t.Fatalf("alternate name ID = %v, want %v", id, newID)
}
_, err = client.Brewery.AddAlternateName("******", altName)
if err == nil {
t.Fatal("expected HTTP 404")
}
testBadURL(t, func() error {
_, err := client.Brewery.AddAlternateName(breweryID, altName)
return err
})
}
func TestBreweryDeleteAlternatName(t *testing.T) {
setup()
defer teardown()
const (
breweryID = "jmGoBA"
altID = 2
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "DELETE")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "alternatename" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/alternatename/:alternatenameId\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if split[4] != strconv.Itoa(altID) {
http.Error(w, "invalid alternatename ID", http.StatusNotFound)
}
})
if err := client.Brewery.DeleteAlternateName(breweryID, altID); err != nil {
t.Fatal(err)
}
if err := client.Brewery.DeleteAlternateName("******", altID); err == nil {
t.Fatal("expected HTTP 404")
}
if err := client.Brewery.DeleteAlternateName(breweryID, -1); err == nil {
t.Fatal("expected HTTP 404")
}
testBadURL(t, func() error {
return client.Brewery.DeleteAlternateName(breweryID, altID)
})
}
func TestBreweryAddGuild(t *testing.T) {
setup()
defer teardown()
const (
breweryID = "jmGoBA"
guildID = "k2jMtH"
)
discount := "10%"
firstTest := true
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "POST")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "guilds" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/guilds\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
checkPostFormValue(t, r, "guildId", guildID)
if firstTest {
checkPostFormValue(t, r, "discount", discount)
}
})
if err := client.Brewery.AddGuild(breweryID, guildID, &discount); err != nil {
t.Fatal(err)
}
firstTest = false
if err := client.Brewery.AddGuild("******", guildID, nil); err == nil {
t.Fatal("expected HTTP 404")
}
if err := client.Brewery.AddGuild(breweryID, guildID, nil); err != nil {
t.Fatal(err)
}
testBadURL(t, func() error {
return client.Brewery.AddGuild(breweryID, guildID, &discount)
})
}
func TestBreweryDeleteGuild(t *testing.T) {
setup()
defer teardown()
const (
breweryID = "jmGoBA"
guildID = "k2jMtH"
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "DELETE")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "guild" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/guild/:guildId\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if split[4] != guildID {
http.Error(w, "invalid Guild ID", http.StatusNotFound)
}
})
if err := client.Brewery.DeleteGuild(breweryID, guildID); err != nil {
t.Fatal(err)
}
if err := client.Brewery.DeleteGuild("******", guildID); err == nil {
t.Fatal("expected HTTP 404")
}
if err := client.Brewery.DeleteGuild(breweryID, "~~~~~~"); err == nil {
t.Fatal("expected HTTP 404")
}
testBadURL(t, func() error {
return client.Brewery.DeleteGuild(breweryID, guildID)
})
}
func TestBreweryAddLocation(t *testing.T) {
setup()
defer teardown()
location := makeTestLocation()
const (
breweryID = "jmGoBA"
newID = "abcdef"
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "POST")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "locations" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/locations\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if err := r.ParseForm(); err != nil {
http.Error(w, "failed to parse form", http.StatusBadRequest)
}
checkPostFormValue(t, r, "name", location.Name)
checkPostFormValue(t, r, "streetAddress", location.StreetAddress)
checkPostFormValue(t, r, "locality", location.Locality)
checkPostFormValue(t, r, "region", location.Region)
checkPostFormValue(t, r, "postalCode", location.PostalCode)
checkPostFormValue(t, r, "phone", location.Phone)
checkPostFormValue(t, r, "website", location.Website)
checkPostFormValue(t, r, "hoursOfOperationExplicit", location.HoursOfOperationExplicit[0])
checkPostFormValue(t, r, "latitude", fmt.Sprintf("%f", location.Latitude))
checkPostFormValue(t, r, "longitude", fmt.Sprintf("%f", location.Longitude))
checkPostFormValue(t, r, "isPrimary", "Y")
checkPostFormValue(t, r, "openToPublic", "Y")
checkPostFormValue(t, r, "locationType", string(location.LocationType))
checkPostFormValue(t, r, "countryIsoCode", location.CountryISOCode)
// Check that fields tagged with "-" or "omitempty" are NOT encoded
checkPostFormDNE(t, r, "id", "ID", "extendedAddress",
"ExtendedAddress", "hoursOfOperation", "hoursOfOperationNotes", "tourInfo",
"LocationTypeDisplay", "country", "Country", "yearClosed",
"breweryID", "BreweryID", "brewery", "Brewery",
"status", "Status", "inPlanning", "isClosed")
fmt.Fprintf(w, `{"status":"...", "data":{"guid":"%s"}, "message":"..."}`, newID)
})
id, err := client.Brewery.AddLocation(breweryID, location)
if err != nil {
t.Fatal(err)
}
if id != newID {
t.Fatalf("Location ID = %v, want %v", id, newID)
}
_, err = client.Brewery.AddLocation("******", location)
if err == nil {
t.Fatal("expected HTTP 404 error")
}
_, err = client.Brewery.AddLocation(breweryID, nil)
if err == nil {
t.Fatal("expected error regarding nil parameter")
}
testBadURL(t, func() error {
_, err := client.Brewery.AddLocation(breweryID, location)
return err
})
}
func TestBreweryDeleteSocialAccount(t *testing.T) {
setup()
defer teardown()
const (
breweryID = "jmGoBA"
socialID = 2
)
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "DELETE")
split := strings.Split(r.URL.Path, "/")
if split[1] != "brewery" || split[3] != "socialaccount" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/socialaccount/:socialaccountId\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
if split[4] != strconv.Itoa(socialID) {
http.Error(w, "invalid socialaccount ID", http.StatusNotFound)
}
})
if err := client.Brewery.DeleteSocialAccount(breweryID, socialID); err != nil {
t.Fatal(err)
}
if err := client.Brewery.DeleteSocialAccount("******", socialID); err == nil {
t.Fatal("expected HTTP 404")
}
if err := client.Brewery.DeleteSocialAccount(breweryID, -1); err == nil {
t.Fatal("expected HTTP 404")
}
testBadURL(t, func() error {
return client.Brewery.DeleteSocialAccount(breweryID, socialID)
})
}
func TestBreweryListEvents(t *testing.T) {
setup()
defer teardown()
data := loadTestData("event.list.json", t)
defer data.Close()
const breweryID = "jmGoBA"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "events" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/events\"")
}
if split[2] != breweryID {
http.Error(w, "invalid brewery ID", http.StatusNotFound)
}
checkFormValue(t, r, "onlyWinners", "Y")
io.Copy(w, data)
})
el, err := client.Brewery.ListEvents(breweryID, true)
if err != nil {
t.Fatal(err)
}
if len(el) <= 0 {
t.Fatal("Expected >0 Events")
}
for _, e := range el {
if l := 6; l != len(e.ID) {
t.Fatalf("Event ID len = %d, wanted %d", len(e.ID), l)
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListEvents(breweryID, false)
return err
})
}
func TestBreweryListBeers(t *testing.T) {
setup()
defer teardown()
data := loadTestData("beer.list.json", t)
defer data.Close()
const breweryID = "o9TSOv"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "beers" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/beers\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
checkFormValue(t, r, "withBreweries", "Y")
checkFormValue(t, r, "withSocialAccounts", "Y")
checkFormValue(t, r, "withIngredients", "Y")
io.Copy(w, data)
})
req := &BreweryBeersRequest{
WithBreweries: true,
WithSocialAccounts: true,
WithIngredients: true,
}
bl, err := client.Brewery.ListBeers(breweryID, req)
if err != nil {
t.Fatal(err)
}
if len(bl) <= 0 {
t.Fatal("Expected >0 Beers")
}
for _, b := range bl {
if l := 6; l != len(b.ID) {
t.Fatalf("Brewery ID len = %d, wanted %d", len(b.ID), l)
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListBeers(breweryID, req)
return err
})
}
func TestBreweryListGuilds(t *testing.T) {
setup()
defer teardown()
data := loadTestData("guild.list.json", t)
defer data.Close()
const breweryID = "o9TSOv"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "guilds" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/guilds\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
io.Copy(w, data)
})
ll, err := client.Brewery.ListGuilds(breweryID)
if err != nil {
t.Fatal(err)
t.Fatal(err)
}
if len(ll) <= 0 {
t.Fatal("Expected >0 Guilds")
}
for _, loc := range ll {
if l := 6; l != len(loc.ID) {
t.Fatalf("Brewery ID len = %d, wanted %d", len(loc.ID), l)
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListGuilds(breweryID)
return err
})
}
func TestBreweryListLocations(t *testing.T) {
setup()
defer teardown()
data := loadTestData("location.list.json", t)
defer data.Close()
const breweryID = "o9TSOv"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "locations" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/locations\"")
}
if split[2] != breweryID {
http.Error(w, "invalid Brewery ID", http.StatusNotFound)
}
io.Copy(w, data)
})
ll, err := client.Brewery.ListLocations(breweryID)
if err != nil {
t.Fatal(err)
}
if len(ll) <= 0 {
t.Fatal("Expected >0 Locations")
}
for _, loc := range ll {
if l := 6; l != len(loc.ID) {
t.Fatalf("Brewery ID len = %d, wanted %d", len(loc.ID), l)
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListLocations(breweryID)
return err
})
}
func TestBreweryListAlternateNames(t *testing.T) {
setup()
defer teardown()
data := loadTestData("brewery.list.alternatenames.json", t)
defer data.Close()
const breweryID = "tNDKBY"
mux.HandleFunc("/brewery/", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
split := strings.Split(r.URL.Path, "/")
if split[3] != "alternatenames" {
t.Fatal("bad URL, expected \"/brewery/:breweryId/alternatenames\"")
}
if split[2] != breweryID {
http.Error(w, "invalid brewery ID", http.StatusNotFound)
}
io.Copy(w, data)
})
al, err := client.Brewery.ListAlternateNames(breweryID)
if err != nil {
t.Fatal(err)
}
if len(al) <= 0 {
t.Fatal("Expected >0 AlternateNames")
}
for _, alt := range al {
if alt.ID <= 0 {
t.Fatalf("Expected ID >0")
}
}
testBadURL(t, func() error {
_, err := client.Brewery.ListAlternateNames(breweryID)
return err
})
}
func TestBreweryGetRandom(t *testing.T) {
setup()
defer teardown()
data, err := os.Open("test_data/brewery.get.random.json")
if err != nil {
t.Fatal("Failed to open test data file")
}
defer data.Close()
mux.HandleFunc("/brewery/random", func(w http.ResponseWriter, r *http.Request) {
checkMethod(t, r, "GET")
// TODO: check more request query values
checkFormValue(t, r, "established", "1983")
io.Copy(w, data)
})
b, err := client.Brewery.GetRandom(&RandomBreweryRequest{Established: "1983"})
if err != nil {
t.Fatal(err)
}
// Can't really verify specific information since it's a random brewery
if len(b.Name) <= 0 {
t.Fatal("Expected non-empty brewery name")
}
if len(b.ID) <= 0 {
t.Fatal("Expected non-empty brewery ID")
}
testBadURL(t, func() error {
_, err := client.Brewery.GetRandom(&RandomBreweryRequest{Established: "1983"})
return err
})
}
// Get all breweries established in 1983
func ExampleBreweryService_List() {
c := NewClient(os.Getenv("BREWERYDB_API_KEY"))
bl, err := c.Brewery.List(&BreweryListRequest{Established: "1983"})
if err != nil {
panic(err)
}
for _, b := range bl.Breweries {
fmt.Println(b.Name, b.ID)
}
}
// Get all information about brewery with given ID (Flying Dog)
func ExampleBreweryService_Get() {
c := NewClient(os.Getenv("BREWERYDB_API_KEY"))
b, err := c.Brewery.Get("jmGoBA")
if err != nil {
panic(err)
}
fmt.Println(b.Name)
fmt.Println(b.Description)
}
| [
"\"BREWERYDB_API_KEY\"",
"\"BREWERYDB_API_KEY\""
]
| []
| [
"BREWERYDB_API_KEY"
]
| [] | ["BREWERYDB_API_KEY"] | go | 1 | 0 | |
py/trash/005-2_agg_each_lgb_1.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 22:33:48 2019
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from sklearn.metrics import roc_auc_score
import utils , utils_cat
utils.start(__file__)
#==============================================================================
SEED = np.random.randint(9999)
print('SEED:', SEED)
DROP = [
# 'f002_EngineVersion', 'f002_AvSigVersion', 'f002_AppVersion',
#
# 'f003_AvSigVersion', 'f003_OsBuildLab', 'f003_Census_OSVersion',
# 'f003_date_min', 'f003_date_max'
]
NFOLD = 5
LOOP = 1
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.05,
'max_depth': -1,
'num_leaves': 2**6 -1,
'max_bin': 127,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.7,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
NROUND = 500
ESR = 50
VERBOSE_EVAL = 25
TRAIN_TH = 0.6
VALID_TH = 0.8
outpath_tr = '../data/train_f005_1.f'
outpath_te = '../data/test_f005_1.f'
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/f005/train_f005*.f'))[20:40]
[print(i,f) for i,f in enumerate(files_tr)]
X_train = pd.concat([
pd.read_feather(f).sample(frac=0.5, random_state=SEED) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y_train = utils.load_target().sample(frac=0.5, random_state=SEED)['HasDetections']
if len(DROP)>0:
X_train.drop(DROP, axis=1, inplace=True)
#adv = pd.read_csv('../data/oof_802_adv.py.csv').iloc[:8921483].oof
#adv_th = adv.quantile(VALID_TH)
#
#X_valid = X_train[adv>adv.quantile(VALID_TH)]
#y_valid = y_train[adv>adv.quantile(VALID_TH)]
#
#X_train = X_train[adv<=adv.quantile(TRAIN_TH)]
#y_train = y_train[adv<=adv.quantile(TRAIN_TH)]
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
#print(f'X_valid.shape {X_valid.shape}')
gc.collect()
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
print(f'CAT: {CAT}')
# =============================================================================
# hold out
# =============================================================================
dtrain = lgb.Dataset(X_train, y_train.values,
categorical_feature=CAT,
free_raw_data=False)
#dvalid = lgb.Dataset(X_valid, y_valid.values,
# categorical_feature=CAT,
# free_raw_data=False)
gc.collect()
model = lgb.train(params=param, train_set=dtrain, num_boost_round=NROUND,
# valid_sets=[dtrain, dvalid],
# valid_names=['train','valid'],
# feval=ex.eval_auc,
categorical_feature=CAT,
# early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL)
imp = ex.getImp(model)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
# =============================================================================
#
# =============================================================================
imp = pd.read_csv('LOG/imp_005-2_agg_each_lgb_1.py.csv')
COL = imp.head(30).feature.tolist()
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)[COL]
X_train.to_feather(outpath_tr)
files_te = sorted(glob('../data/f005/test_f005*.f'))[20:40]
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
X_test.to_feather(outpath_te)
#==============================================================================
utils.end(__file__)
#utils.stop_instance()
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
venv/Lib/site-packages/fitz/tools/setup_project.py | #! /usr/bin/env python
"""
Script to guide project setup.
The basic idea here is borrowed from the Sphinx project, and
in fact a significant amount of the code in this file was
borrowed from there too.
"""
import sys
import os
import time
import re
import os.path as op
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
codes = {}
def get_terminal_width():
"""Borrowed from the py lib."""
try:
import termios
import fcntl
import struct
call = fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('hhhh', 0, 0, 0, 0))
height, width = struct.unpack('hhhh', call)[:2]
terminal_width = width
except (SystemExit, KeyboardInterrupt):
raise
except:
# FALLBACK
terminal_width = int(os.environ.get('COLUMNS', 80)) - 1
return terminal_width
_tw = get_terminal_width()
def term_width_line(text):
if not codes:
# if no coloring, don't output fancy backspaces
return text + '\n'
else:
# codes are not displayed, this must be taken into account
return text.ljust(_tw + len(text) - len(_ansi_re.sub('', text))) + '\r'
def color_terminal():
if not hasattr(sys.stdout, 'isatty'):
return False
if not sys.stdout.isatty():
return False
if 'COLORTERM' in os.environ:
return True
term = os.environ.get('TERM', 'dumb').lower()
if term in ('xterm', 'linux') or 'color' in term:
return True
return False
def nocolor():
codes.clear()
def coloron():
codes.update(_orig_codes)
def colorize(name, text):
return codes.get(name, '') + text + codes.get('reset', '')
def create_color_func(name):
def inner(text):
return colorize(name, text)
globals()[name] = inner
_attrs = {
'reset': '39;49;00m',
'bold': '01m',
'faint': '02m',
'standout': '03m',
'underline': '04m',
'blink': '05m',
}
for _name, _value in _attrs.items():
codes[_name] = '\x1b[' + _value
_colors = [
('black', 'darkgray'),
('darkred', 'red'),
('darkgreen', 'green'),
('brown', 'yellow'),
('darkblue', 'blue'),
('purple', 'fuchsia'),
('turquoise', 'teal'),
('lightgray', 'white'),
]
for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i+30)
codes[light] = '\x1b[%i;01m' % (i+30)
_orig_codes = codes.copy()
for _name in codes:
create_color_func(_name)
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
PROMPT_PREFIX = "> "
PROJECT_CONF = """\
# -*- coding: utf-8 -*-
#
# Configuration file for %(project_name)s built on %(now)s
# Default experiment name
default_exp = %(default_exp)s
# Data directory is where the input data lives
data_dir = '%(data_dir)s'
# Analysis directory is where anything written by these scripts will live
analysis_dir = '%(analysis_dir)s'
# Working directory is where data lives during workflow execution
working_dir = '%(working_dir)s'
# Crash directory is where debugging info will be written if things go wrong
crash_dir = '%(crash_dir)s'
# Set this to True to remove the working directory after each excecution
rm_working_dir = %(rm_work_dir)s
"""
def mkdir_p(dir):
if not op.isdir(dir):
os.makedirs(dir)
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x):
if op.exists(x) and not op.isdir(x):
raise ValidationError("Please enter a valid path name.")
return x
def nonnull_string(s):
if s is not None:
return "'%s'" % s
def nonempty(x):
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
def val(x):
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
return val
def boolean(x):
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
return x
def ok(x):
return x
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default:
prompt = blue(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = blue(PROMPT_PREFIX + text + ': ')
x = raw_input(prompt)
if default and not x:
x = default
if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
if TERM_ENCODING:
x = x.decode(TERM_ENCODING)
else:
print yellow('* Note: non-ASCII characters entered '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.')
try:
x = x.decode('utf-8')
except UnicodeDecodeError:
x = x.decode('latin1')
try:
x = validator(x)
except ValidationError, err:
print red('* ' + str(err))
continue
break
d[key] = x
def main(args):
d = dict()
if not color_terminal():
nocolor()
# Check if a project file already exists
if op.exists("project.py"):
# But let's make sure it's clean
try:
import project
clean_import = True
except Exception:
clean_import = False
import_notes = "" if clean_import else ", but it did not import cleanly"
# Maybe give a heads up about it
print red("Warning:"), """\
project.py file found in current directory%s.
Do you wish to generate a new project file?
(Note that you can always edit the existing file).
""" % import_notes
# And let the user choose whether to overwrite it
do_prompt(d, "overwrite", "Overwrite existing file? (y/N)",
"n", boolean)
if not d["overwrite"]:
print red("Aborting project setup.")
sys.exit(0)
os.remove("project.py")
# Now go through the prompted setup procedure
print bold("Let's set up your project.")
print '''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).
Please use relative paths.
'''
do_prompt(d, "project_name", "Project name")
do_prompt(d, "default_exp", "Default experiment", None, nonnull_string)
do_prompt(d, "data_dir", "Data tree path", "../data", is_path)
do_prompt(d, "analysis_dir", "Analysis tree path", "../analysis", is_path)
do_prompt(d, "working_dir", "Working tree path",
op.join(d['analysis_dir'], 'workingdir'), is_path)
crash_stem = "niypype-" + os.environ.get("LOGNAME", "-") + "-crashes"
do_prompt(d, "crash_dir", "Crashdump path",
op.join("../analysis", crash_stem))
do_prompt(d, "rm_work_dir", "Remove working directory after execution? (Y/n)",
"y", boolean)
# Record the time this happened
d['now'] = time.asctime()
# Write the project file
f = open("project.py", "w")
conf_text = PROJECT_CONF % d
f.write(conf_text.encode("utf-8"))
f.close()
# Create Data Directory
if not op.isdir(d['data_dir']):
os.makedirs(d['data_dir'])
if __name__ == "__main__":
main(sys.args)
| []
| []
| [
"TERM",
"LOGNAME",
"COLUMNS"
]
| [] | ["TERM", "LOGNAME", "COLUMNS"] | python | 3 | 0 | |
venonactl/cmd/cmdutils.go | package cmd
import (
"fmt"
"os"
"os/user"
"path"
"strings"
"github.com/codefresh-io/go-sdk/pkg/codefresh"
sdkUtils "github.com/codefresh-io/go-sdk/pkg/utils"
"github.com/codefresh-io/venona/venonactl/pkg/certs"
"github.com/codefresh-io/venona/venonactl/pkg/kube"
"github.com/codefresh-io/venona/venonactl/pkg/logger"
"github.com/codefresh-io/venona/venonactl/pkg/plugins"
"github.com/codefresh-io/venona/venonactl/pkg/store"
"github.com/olekukonko/tablewriter"
)
var (
version = "dev"
commit = "none"
date = "unknown"
// set to false by default, when running hack/build.sh will change to true
// to prevent version checking during development
localDevFlow = "false"
verbose bool
configPath string
cfAPIHost string
cfAPIToken string
cfContext string
kubeConfigPath string
skipVerionCheck bool
)
func buildBasicStore(logger logger.Logger) {
s := store.GetStore()
s.Version = &store.Version{
Current: &store.CurrentVersion{
Version: version,
Commit: commit,
Date: date,
},
}
s.Image = &store.Image{
Name: "codefresh/venona",
}
s.Mode = store.ModeInCluster
s.ServerCert = &certs.ServerCert{}
s.AppName = store.ApplicationName
if skipVerionCheck || localDevFlow == "true" {
latestVersion := &store.LatestVersion{
Version: store.DefaultVersion,
IsDefault: true,
}
s.Version.Latest = latestVersion
logger.Debug("Skipping version check")
} else {
latestVersion := &store.LatestVersion{
Version: store.GetLatestVersion(logger),
IsDefault: false,
}
s.Image.Tag = latestVersion.Version
s.Version.Latest = latestVersion
res, _ := store.IsRunningLatestVersion()
// the local version and the latest version not match
// make sure the command is no venonactl version
if !res {
logger.Info("New version is avaliable, please update",
"Local-Version", s.Version.Current.Version,
"Latest-Version", s.Version.Latest.Version)
}
}
}
func extendStoreWithCodefershClient(logger logger.Logger) error {
s := store.GetStore()
if configPath == "" {
configPath = fmt.Sprintf("%s/.cfconfig", os.Getenv("HOME"))
}
if cfAPIHost == "" && cfAPIToken == "" {
context, err := sdkUtils.ReadAuthContext(configPath, cfContext)
if err != nil {
return err
}
cfAPIHost = context.URL
cfAPIToken = context.Token
logger.Debug("Using codefresh context", "Context-Name", context.Name, "Host", cfAPIHost)
} else {
logger.Debug("Reading creentials from environment variables")
if cfAPIHost == "" {
cfAPIHost = "https://g.codefresh.io"
}
}
logger.Debug("Creating codefresh client", "host", cfAPIHost, "token", cfAPIToken)
client := codefresh.New(&codefresh.ClientOptions{
Auth: codefresh.AuthOptions{
Token: cfAPIToken,
},
Host: cfAPIHost,
})
s.CodefreshAPI = &store.CodefreshAPI{
Host: cfAPIHost,
Token: cfAPIToken,
Client: client,
}
return nil
}
func extendStoreWithKubeClient(logger logger.Logger) {
s := store.GetStore()
if kubeConfigPath == "" {
currentUser, _ := user.Current()
if currentUser != nil {
kubeConfigPath = path.Join(currentUser.HomeDir, ".kube", "config")
logger.Debug("Path to kubeconfig not set, using default")
}
}
s.KubernetesAPI = &store.KubernetesAPI{
ConfigPath: kubeConfigPath,
}
}
func isUsingDefaultStorageClass(sc string) bool {
if sc == "" {
return true
}
return strings.HasPrefix(sc, plugins.DefaultStorageClassNamePrefix)
}
func dieOnError(err error) {
if err != nil {
fmt.Printf("Error: %s", err.Error())
os.Exit(1)
}
}
func createTable() *tablewriter.Table {
table := tablewriter.NewWriter(os.Stdout)
table.SetBorder(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
table.SetRowLine(false)
table.SetHeaderLine(false)
table.SetColumnSeparator(" ")
table.SetColWidth(100)
return table
}
func getKubeClientBuilder(context string, namespace string, path string, inCluster bool) kube.Kube {
return kube.New(&kube.Options{
ContextName: context,
Namespace: namespace,
PathToKubeConfig: path,
InCluster: inCluster,
})
}
func createLogger(command string, verbose bool) logger.Logger {
logFile := "venonalog.json"
os.Remove(logFile)
return logger.New(&logger.Options{
Command: command,
Verbose: verbose,
LogToFile: logFile,
})
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/integrationtest/java/infrastructure/SeleniumExtensions.java | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package infrastructure;
import labapi.FederationProvider;
import labapi.LabConstants;
import labapi.User;
import org.apache.commons.io.FileUtils;
import org.openqa.selenium.By;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.TimeoutException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.chrome.ChromeOptions;
import org.openqa.selenium.support.ui.ExpectedCondition;
import org.openqa.selenium.support.ui.WebDriverWait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.concurrent.TimeUnit;
public class SeleniumExtensions {
private final static Logger LOG = LoggerFactory.getLogger(SeleniumExtensions.class);
private SeleniumExtensions() {
}
public static WebDriver createDefaultWebDriver() {
ChromeOptions options = new ChromeOptions();
//no visual rendering, remove when debugging
options.addArguments("--headless");
System.setProperty("webdriver.chrome.driver", "C:/Windows/chromedriver.exe");
ChromeDriver driver = new ChromeDriver(options);
driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS);
return driver;
}
public static WebElement waitForElementToBeVisibleAndEnable(WebDriver driver, By by, int timeOutInSeconds) {
WebDriverWait webDriverWait = new WebDriverWait(driver, timeOutInSeconds);
return webDriverWait.until((dr) ->
{
try {
WebElement elementToBeDisplayed = driver.findElement(by);
if (elementToBeDisplayed.isDisplayed() && elementToBeDisplayed.isEnabled()) {
return elementToBeDisplayed;
}
return null;
} catch (StaleElementReferenceException e) {
return null;
}
});
}
public static WebElement waitForElementToBeVisibleAndEnable(WebDriver driver, By by) {
int DEFAULT_TIMEOUT_IN_SEC = 15;
return waitForElementToBeVisibleAndEnable(driver, by, DEFAULT_TIMEOUT_IN_SEC);
}
public static void performADLogin(WebDriver driver, User user) {
LOG.info("PerformADLogin");
UserInformationFields fields = new UserInformationFields(user);
LOG.info("Loggin in ... Entering username");
driver.findElement(new By.ById(fields.getAadUserNameInputId())).sendKeys(user.getUpn());
LOG.info("Loggin in ... Clicking <Next> after username");
driver.findElement(new By.ById(fields.getAadSignInButtonId())).click();
if (user.getFederationProvider() == FederationProvider.ADFS_2 &&
!user.getLabName().equals(LabConstants.ARLINGTON_LAB_NAME)) {
LOG.info("Loggin in ... ADFS-V2 - Entering the username in ADFSv2 form");
driver.findElement(new By.ById(SeleniumConstants.ADFSV2_WEB_USERNAME_INPUT_ID)).
sendKeys(user.getUpn());
}
LOG.info("Loggin in ... Entering password");
By by = new By.ById(fields.getPasswordInputId());
waitForElementToBeVisibleAndEnable(driver, by).sendKeys(user.getPassword());
LOG.info("Loggin in ... click submit");
waitForElementToBeVisibleAndEnable(driver, new By.ById(fields.getPasswordSigInButtonId())).
click();
try {
checkAuthenticationCompletePage(driver);
return;
} catch (TimeoutException ex) {
}
LOG.info("Checking optional questions");
try {
LOG.info("Are you trying to sign in to ... ? checking");
waitForElementToBeVisibleAndEnable(driver, new By.ById(SeleniumConstants.ARE_YOU_TRYING_TO_SIGN_IN_TO), 3).
click();
LOG.info("Are you trying to sign in to ... ? click Continue");
} catch (TimeoutException ex) {
}
try {
LOG.info("Stay signed in? checking");
waitForElementToBeVisibleAndEnable(driver, new By.ById(SeleniumConstants.STAY_SIGN_IN_NO_BUTTON_ID), 3).
click();
LOG.info("Stay signed in? click NO");
} catch (TimeoutException ex) {
}
}
private static void checkAuthenticationCompletePage(WebDriver driver) {
(new WebDriverWait(driver, 5)).until((ExpectedCondition<Boolean>) d -> {
boolean condition = false;
WebElement we = d.findElement(new By.ByTagName("body"));
if (we != null && we.getText().contains("Authentication complete")) {
condition = true;
}
return condition;
});
}
public static void performADFS2019Login(WebDriver driver, User user) {
LOG.info("PerformADFS2019Login");
UserInformationFields fields = new UserInformationFields(user);
LOG.info("Loggin in ... Entering username");
driver.findElement(new By.ById(fields.getADFS2019UserNameInputId())).sendKeys(user.getUpn());
LOG.info("Loggin in ... Entering password");
By by = new By.ById(fields.getPasswordInputId());
waitForElementToBeVisibleAndEnable(driver, by).sendKeys(user.getPassword());
LOG.info("Loggin in ... click submit");
waitForElementToBeVisibleAndEnable(driver, new By.ById(fields.getPasswordSigInButtonId())).
click();
}
public static void performLocalLogin(WebDriver driver, User user) {
LOG.info("PerformLocalLogin");
driver.findElement(new By.ById(SeleniumConstants.B2C_LOCAL_ACCOUNT_ID)).click();
LOG.info("Loggin in ... Entering username");
driver.findElement(new By.ById(SeleniumConstants.B2C_LOCAL_USERNAME_ID)).sendKeys(user.getUpn());
LOG.info("Loggin in ... Entering password");
By by = new By.ById(SeleniumConstants.B2C_LOCAL_PASSWORD_ID);
waitForElementToBeVisibleAndEnable(driver, by).sendKeys(user.getPassword());
waitForElementToBeVisibleAndEnable(driver, new By.ById(SeleniumConstants.B2C_LOCAL_SIGN_IN_BUTTON_ID)).
click();
}
public static void performGoogleLogin(WebDriver driver, User user) {
LOG.info("PerformGoogleLogin");
driver.findElement(new By.ById(SeleniumConstants.GOOGLE_ACCOUNT_ID)).click();
LOG.info("Loggin in ... Entering username");
driver.findElement(new By.ById(SeleniumConstants.GOOGLE_USERNAME_ID)).sendKeys(user.getUpn());
LOG.info("Loggin in ... Clicking <Next> after username");
driver.findElement(new By.ById(SeleniumConstants.GOOGLE_NEXT_AFTER_USERNAME_BUTTON)).click();
LOG.info("Loggin in ... Entering password");
By by = new By.ByName(SeleniumConstants.GOOGLE_PASSWORD_ID);
waitForElementToBeVisibleAndEnable(driver, by).sendKeys(user.getPassword());
LOG.info("Loggin in ... click submit");
waitForElementToBeVisibleAndEnable(driver, new By.ById(SeleniumConstants.GOOGLE_NEXT_BUTTON_ID)).click();
}
public static void performFacebookLogin(WebDriver driver, User user) {
LOG.info("PerformFacebookLogin");
driver.findElement(new By.ById(SeleniumConstants.FACEBOOK_ACCOUNT_ID)).click();
LOG.info("Loggin in ... Entering username");
driver.findElement(new By.ById(SeleniumConstants.FACEBOOK_USERNAME_ID)).sendKeys(user.getUpn());
LOG.info("Loggin in ... Entering password");
By by = new By.ById(SeleniumConstants.FACEBOOK_PASSWORD_ID);
waitForElementToBeVisibleAndEnable(driver, by).sendKeys(user.getPassword());
waitForElementToBeVisibleAndEnable(driver, new By.ById(SeleniumConstants.FACEBOOK_LOGIN_BUTTON_ID)).
click();
}
public static void takeScreenShot(WebDriver driver) {
String file = System.getenv("BUILD_STAGINGDIRECTORY");
File destination = new File(file + "" + "/SeleniumError.png");
File scrFile = ((TakesScreenshot) driver).getScreenshotAs(OutputType.FILE);
try {
FileUtils.copyFile(scrFile, destination);
LOG.info("Screenshot can be found at: " + destination.getPath());
} catch (Exception exception) {
LOG.error("Error taking screenshot: " + exception.getMessage());
}
}
}
| [
"\"BUILD_STAGINGDIRECTORY\""
]
| []
| [
"BUILD_STAGINGDIRECTORY"
]
| [] | ["BUILD_STAGINGDIRECTORY"] | java | 1 | 0 | |
eclipse-mosquitto/test/lib/03-publish-c2b-qos1-timeout.py | #!/usr/bin/env python3
# Test whether a client sends a correct PUBLISH to a topic with QoS 1 and responds to a delay.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id publish-qos1-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK the client should verify that rc==0. If not, it should exit with
# return code=1.
# On a successful CONNACK, the client should send a PUBLISH message with topic
# "pub/qos1/test", payload "message" and QoS=1.
# The test will not respond to the first PUBLISH message, so the client must
# resend the PUBLISH message with dup=1. Note that to keep test durations low, a
# message retry timeout of less than 10 seconds is required for this test.
# On receiving the second PUBLISH message, the test will send the correct
# PUBACK response. On receiving the correct PUBACK response, the client should
# send a DISCONNECT message.
from mosq_test_helper import *
port = mosq_test.get_lib_port()
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("publish-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
publish_packet = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message")
publish_packet_dup = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message", dup=True)
puback_packet = mosq_test.gen_puback(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', port))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env, port=port)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
mosq_test.do_receive_send(conn, connect_packet, connack_packet, "connect")
mosq_test.expect_packet(conn, "publish", publish_packet)
# Delay for > 3 seconds (message retry time)
mosq_test.do_receive_send(conn, publish_packet_dup, puback_packet, "dup publish")
mosq_test.expect_packet(conn, "disconnect", disconnect_packet)
rc = 0
conn.close()
except mosq_test.TestError:
pass
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
portapp/wsgi.py | """
WSGI config for portapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "portapp.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/surveyext/editor.go | package surveyext
// This file extends survey.Editor to give it more flexible behavior. For more context, read
// https://github.com/cli/cli/issues/70
// To see what we extended, search through for EXTENDED comments.
import (
"os"
"path/filepath"
"runtime"
"github.com/AlecAivazis/survey/v2"
"github.com/AlecAivazis/survey/v2/terminal"
)
var (
bom = []byte{0xef, 0xbb, 0xbf}
defaultEditor = "nano" // EXTENDED to switch from vim as a default editor
)
func init() {
if runtime.GOOS == "windows" {
defaultEditor = "notepad"
} else if g := os.Getenv("GIT_EDITOR"); g != "" {
defaultEditor = g
} else if v := os.Getenv("VISUAL"); v != "" {
defaultEditor = v
} else if e := os.Getenv("EDITOR"); e != "" {
defaultEditor = e
}
}
// EXTENDED to enable different prompting behavior
type GhEditor struct {
*survey.Editor
EditorCommand string
BlankAllowed bool
}
func (e *GhEditor) editorCommand() string {
if e.EditorCommand == "" {
return defaultEditor
}
return e.EditorCommand
}
// EXTENDED to change prompt text
var EditorQuestionTemplate = `
{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}}
{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}}
{{- color "default+hb"}}{{ .Message }} {{color "reset"}}
{{- if .ShowAnswer}}
{{- color "cyan"}}{{.Answer}}{{color "reset"}}{{"\n"}}
{{- else }}
{{- if and .Help (not .ShowHelp)}}{{color "cyan"}}[{{ .Config.HelpInput }} for help]{{color "reset"}} {{end}}
{{- if and .Default (not .HideDefault)}}{{color "white"}}({{.Default}}) {{color "reset"}}{{end}}
{{- color "cyan"}}[(e) to launch {{ .EditorCommand }}{{- if .BlankAllowed }}, enter to skip{{ end }}] {{color "reset"}}
{{- end}}`
// EXTENDED to pass editor name (to use in prompt)
type EditorTemplateData struct {
survey.Editor
EditorCommand string
BlankAllowed bool
Answer string
ShowAnswer bool
ShowHelp bool
Config *survey.PromptConfig
}
// EXTENDED to augment prompt text and keypress handling
func (e *GhEditor) prompt(initialValue string, config *survey.PromptConfig) (interface{}, error) {
err := e.Render(
EditorQuestionTemplate,
// EXTENDED to support printing editor in prompt and BlankAllowed
EditorTemplateData{
Editor: *e.Editor,
BlankAllowed: e.BlankAllowed,
EditorCommand: filepath.Base(e.editorCommand()),
Config: config,
},
)
if err != nil {
return "", err
}
// start reading runes from the standard in
rr := e.NewRuneReader()
_ = rr.SetTermMode()
defer func() { _ = rr.RestoreTermMode() }()
cursor := e.NewCursor()
cursor.Hide()
defer cursor.Show()
for {
// EXTENDED to handle the e to edit / enter to skip behavior + BlankAllowed
r, _, err := rr.ReadRune()
if err != nil {
return "", err
}
if r == 'e' {
break
}
if r == '\r' || r == '\n' {
if e.BlankAllowed {
return "", nil
} else {
continue
}
}
if r == terminal.KeyInterrupt {
return "", terminal.InterruptErr
}
if r == terminal.KeyEndTransmission {
break
}
if string(r) == config.HelpInput && e.Help != "" {
err = e.Render(
EditorQuestionTemplate,
EditorTemplateData{
// EXTENDED to support printing editor in prompt, BlankAllowed
Editor: *e.Editor,
BlankAllowed: e.BlankAllowed,
EditorCommand: filepath.Base(e.editorCommand()),
ShowHelp: true,
Config: config,
},
)
if err != nil {
return "", err
}
}
continue
}
stdio := e.Stdio()
text, err := Edit(e.editorCommand(), e.FileName, initialValue, stdio.In, stdio.Out, stdio.Err, cursor)
if err != nil {
return "", err
}
// check length, return default value on empty
if len(text) == 0 && !e.AppendDefault {
return e.Default, nil
}
return text, nil
}
// EXTENDED This is straight copypasta from survey to get our overridden prompt called.;
func (e *GhEditor) Prompt(config *survey.PromptConfig) (interface{}, error) {
initialValue := ""
if e.Default != "" && e.AppendDefault {
initialValue = e.Default
}
return e.prompt(initialValue, config)
}
func DefaultEditorName() string {
return filepath.Base(defaultEditor)
}
| [
"\"GIT_EDITOR\"",
"\"VISUAL\"",
"\"EDITOR\""
]
| []
| [
"VISUAL",
"EDITOR",
"GIT_EDITOR"
]
| [] | ["VISUAL", "EDITOR", "GIT_EDITOR"] | go | 3 | 0 | |
Beam/go/vendor/go.uber.org/zap/testutils/timeout.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package testutils provides some simple testing helpers (most of which aren't
// specifically logging-related).
package testutils
import (
"log"
"os"
"strconv"
"time"
)
var _timeoutScale = 1.0
// Timeout scales the provided duration by $TEST_TIMEOUT_SCALE.
func Timeout(base time.Duration) time.Duration {
return time.Duration(float64(base) * _timeoutScale)
}
// Sleep scales the sleep duration by $TEST_TIMEOUT_SCALE.
func Sleep(base time.Duration) {
time.Sleep(Timeout(base))
}
func init() {
if v := os.Getenv("TEST_TIMEOUT_SCALE"); v != "" {
fv, err := strconv.ParseFloat(v, 64)
if err != nil {
panic(err)
}
_timeoutScale = fv
log.Printf("Scaling timeouts by %vx.\n", _timeoutScale)
}
}
| [
"\"TEST_TIMEOUT_SCALE\""
]
| []
| [
"TEST_TIMEOUT_SCALE"
]
| [] | ["TEST_TIMEOUT_SCALE"] | go | 1 | 0 | |
ticketbuy_project/ticketbuy_project/asgi.py | """
ASGI config for ticketbuy_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ticketbuy_project.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
augur/cli/backend.py | #SPDX-License-Identifier: MIT
"""
Augur library commands for controlling the backend components
"""
from copy import deepcopy
import os, time, atexit, subprocess, click, atexit, logging, sys
import psutil
import signal
import multiprocessing as mp
import gunicorn.app.base
from gunicorn.arbiter import Arbiter
from augur.cli import initialize_logging, pass_config, pass_application
from augur.housekeeper import Housekeeper
from augur.server import Server
from augur.application import Application
from augur.gunicorn import AugurGunicornApp
logger = logging.getLogger("augur")
@click.group('server', short_help='Commands for controlling the backend API server & data collection workers')
def cli():
pass
@cli.command("start")
@click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper")
@click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts")
def start(disable_housekeeper, skip_cleanup):
"""
Start Augur's backend server
"""
augur_app = Application()
logger.info("Augur application initialized")
logger.info(f"Using config file: {augur_app.config.config_file_location}")
if not skip_cleanup:
logger.debug("Cleaning up old Augur processes...")
_broadcast_signal_to_processes()
time.sleep(2)
else:
logger.debug("Skipping process cleanup")
master = initialize_components(augur_app, disable_housekeeper)
logger.info('Starting Gunicorn webserver...')
logger.info(f"Augur is running at: http://0.0.0.0:5000")
logger.info("Gunicorn server logs & errors will be written to logs/gunicorn.log")
logger.info('Housekeeper update process logs will now take over.')
Arbiter(master).run()
@cli.command('stop')
@initialize_logging
def stop():
"""
Sends SIGTERM to all Augur server & worker processes
"""
_broadcast_signal_to_processes(given_logger=logging.getLogger("augur.cli"))
@cli.command('kill')
@initialize_logging
def kill():
"""
Sends SIGKILL to all Augur server & worker processes
"""
_broadcast_signal_to_processes(signal=signal.SIGKILL, given_logger=logging.getLogger("augur.cli"))
@cli.command('processes')
@initialize_logging
def processes():
"""
Outputs the name/PID of all Augur server & worker processes"""
logger = logging.getLogger("augur.cli")
processes = get_augur_processes()
for process in processes:
logger.info(f"Found process {process.pid}")
def get_augur_processes():
processes = []
for process in psutil.process_iter(['cmdline', 'name', 'environ']):
if process.info['cmdline'] is not None and process.info['environ'] is not None:
try:
if os.getenv('VIRTUAL_ENV') in process.info['environ']['VIRTUAL_ENV'] and 'python' in ''.join(process.info['cmdline'][:]).lower():
if process.pid != os.getpid():
processes.append(process)
except KeyError:
pass
return processes
def _broadcast_signal_to_processes(signal=signal.SIGTERM, given_logger=None):
if given_logger is None:
_logger = logger
else:
_logger = given_logger
processes = get_augur_processes()
if processes != []:
for process in processes:
if process.pid != os.getpid():
logger.info(f"Stopping process {process.pid}")
try:
process.send_signal(signal)
except psutil.NoSuchProcess as e:
pass
def initialize_components(augur_app, disable_housekeeper):
master = None
manager = None
broker = None
housekeeper = None
worker_processes = []
mp.set_start_method('forkserver', force=True)
if not disable_housekeeper:
manager = mp.Manager()
broker = manager.dict()
housekeeper = Housekeeper(broker=broker, augur_app=augur_app)
controller = augur_app.config.get_section('Workers')
for worker in controller.keys():
if controller[worker]['switch']:
for i in range(controller[worker]['workers']):
logger.info("Booting {} #{}".format(worker, i + 1))
worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True)
worker_processes.append(worker_process)
worker_process.start()
augur_app.manager = manager
augur_app.broker = broker
augur_app.housekeeper = housekeeper
atexit._clear()
atexit.register(exit, augur_app, worker_processes, master)
return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app)
def worker_start(worker_name=None, instance_number=0, worker_port=None):
try:
time.sleep(30 * instance_number)
destination = subprocess.DEVNULL
process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT)
logger.info("{} #{} booted.".format(worker_name,instance_number+1))
except KeyboardInterrupt as e:
pass
def exit(augur_app, worker_processes, master):
logger.info("Shutdown started for this Gunicorn worker...")
augur_app.shutdown()
if worker_processes:
for process in worker_processes:
logger.debug("Shutting down worker process with pid: {}...".format(process.pid))
process.terminate()
if master is not None:
logger.debug("Shutting down Gunicorn server")
master.halt()
logger.info("Shutdown complete")
sys.exit(0)
| []
| []
| [
"VIRTUAL_ENV"
]
| [] | ["VIRTUAL_ENV"] | python | 1 | 0 | |
plugins/openstack/openstack_common.py | import os
from common import (
helpers,
)
OST_PROJECTS = ["aodh",
"barbican",
"ceilometer",
"cinder",
"designate",
"glance",
"gnocchi",
"heat",
"horizon",
"keystone",
"neutron",
"nova",
"octavia",
"swift",
]
SVC_VALID_SUFFIX = r"[0-9a-zA-Z-_]*[^:/]?"
# TODO: keep this list up-to-date with services we care about in the context of
# openstack.
OST_SERVICES = [r"aodh{}".format(SVC_VALID_SUFFIX),
r"barbican{}".format(SVC_VALID_SUFFIX),
r"ceilometer{}".format(SVC_VALID_SUFFIX),
r"cinder{}".format(SVC_VALID_SUFFIX),
r"designate{}".format(SVC_VALID_SUFFIX),
r"glance{}".format(SVC_VALID_SUFFIX),
r"gnocchi{}".format(SVC_VALID_SUFFIX),
r"heat{}".format(SVC_VALID_SUFFIX),
r"horizon",
r"keystone{}".format(SVC_VALID_SUFFIX),
r"neutron{}".format(SVC_VALID_SUFFIX),
r"nova{}".format(SVC_VALID_SUFFIX),
r"octavia{}".format(SVC_VALID_SUFFIX),
r"swift{}".format(SVC_VALID_SUFFIX),
]
# Services that are not actually openstack projects but are used by them
OST_SERVICES_DEPS = [r"apache{}".format(SVC_VALID_SUFFIX),
r"beam.smp",
r"dnsmasq",
r"haproxy",
r"keepalived{}".format(SVC_VALID_SUFFIX),
r"mysqld",
r"ovs{}".format(SVC_VALID_SUFFIX),
r"ovn{}".format(SVC_VALID_SUFFIX),
r"rabbitmq-server",
r"vault{}".format(SVC_VALID_SUFFIX),
r"qemu-system-\S+",
]
OST_DEP_PKGS = [r"conntrack",
r"dnsmasq",
r"haproxy",
r"keepalived",
r"libc-bin",
r"libvirt-daemon",
r"libvirt-bin",
r"python3?-oslo[.-]",
r"openvswitch-switch",
r"ovn",
r"qemu-kvm",
r"rabbitmq-server",
]
CINDER_LOGS = "var/log/cinder"
GLANCE_LOGS = "var/log/glance"
HEAT_LOGS = "var/log/heat"
KEYSTONE_LOGS = "var/log/keystone"
NEUTRON_LOGS = "var/log/neutron"
NOVA_LOGS = "var/log/nova"
OCTAVIA_LOGS = "var/log/octavia"
OPENSTACK_AGENT_ERROR_KEY_BY_TIME = \
helpers.bool_str(os.environ.get('OPENSTACK_AGENT_ERROR_KEY_BY_TIME',
"False"))
OPENSTACK_SHOW_CPU_PINNING_RESULTS = \
helpers.bool_str(os.environ.get('OPENSTACK_SHOW_CPU_PINNING_RESULTS',
"False"))
| []
| []
| [
"OPENSTACK_SHOW_CPU_PINNING_RESULTS",
"OPENSTACK_AGENT_ERROR_KEY_BY_TIME"
]
| [] | ["OPENSTACK_SHOW_CPU_PINNING_RESULTS", "OPENSTACK_AGENT_ERROR_KEY_BY_TIME"] | python | 2 | 0 | |
step3/src/main.go | package main
import (
"database/sql"
"fmt"
"html/template"
"log"
"net/http"
"os"
_ "github.com/go-sql-driver/mysql"
)
var db *sql.DB
// WordCount corresponds to the column of word_tb
type WordCount struct {
Word string `db:"word"`
Count int `db:"count"`
}
// Output is the data to pass to template file "index.html"
type Output struct {
Input string
WordCounts []WordCount
}
func getWords() (wordCounts []WordCount, err error) {
rows, err := db.Query("SELECT word, count FROM word_tb ")
if err != nil {
return nil, err
}
for rows.Next() {
var wordCount WordCount
err = rows.Scan(&wordCount.Word, &wordCount.Count)
wordCounts = append(wordCounts, wordCount)
}
return wordCounts, nil
}
func updateWords(word string) error {
stmtSel, err := db.Prepare("SELECT word FROM word_tb WHERE word = ?")
if err != nil {
return err
}
defer stmtSel.Close()
rows, err := stmtSel.Query(word)
// If rows exists, the word is already in word_tb.
// If not, the word needs to be inserted to word_tb.
if rows.Next() {
stmtUp, err := db.Prepare("UPDATE word_tb SET count = count + 1 WHERE word = ?")
if err != nil {
return err
}
defer stmtUp.Close()
_, err = stmtUp.Exec(word)
if err != nil {
return err
}
} else {
stmtIn, err := db.Prepare("INSERT INTO word_tb (word, count) VALUES (?, 1)")
if err != nil {
return err
}
defer stmtIn.Close()
_, err = stmtIn.Exec(word)
if err != nil {
return err
}
}
return nil
}
func mainPage(w http.ResponseWriter, r *http.Request) {
var input string
// If http method is POST, the database needs to be updated.
if r.Method == http.MethodPost {
err := r.ParseForm()
if err != nil {
log.Println(err)
}
input = r.FormValue("word")
if input != "" {
err = updateWords(input)
if err != nil {
log.Println(err)
fmt.Fprintf(w, err.Error())
return
}
}
}
wordCounts, err := getWords()
if err != nil {
log.Println(err)
fmt.Fprintf(w, err.Error())
return
}
// html/template already has xss countermeasure function
tpl := template.Must(template.ParseFiles("template/index.html"))
output := Output{
Input: input,
WordCounts: wordCounts,
}
tpl.Execute(w, output)
}
func main() {
db, _ = sql.Open("mysql", os.Getenv("MYSQL_CONNECTION"))
defer db.Close()
// checking DB connection
err := db.Ping()
if err != nil {
log.Fatal(err)
}
http.HandleFunc("/", mainPage)
log.Fatal(http.ListenAndServe(":8080", nil))
}
| [
"\"MYSQL_CONNECTION\""
]
| []
| [
"MYSQL_CONNECTION"
]
| [] | ["MYSQL_CONNECTION"] | go | 1 | 0 | |
deep_rl/utils/torch_utils.py | #######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from .config import *
import torch
import os
import math
import functools
from collections import OrderedDict
from torchmeta.modules import MetaModule, MetaSequential, MetaLinear
# from torchmeta.modules.utils import get_subdict
import torch.nn as nn
def select_device(gpu_id):
# if torch.cuda.is_available() and gpu_id >= 0:
if gpu_id >= 0:
Config.DEVICE = torch.device('cuda:%d' % (gpu_id))
else:
Config.DEVICE = torch.device('cpu')
def tensor(x):
if isinstance(x, torch.Tensor):
return x
x = np.asarray(x, dtype=np.float)
x = torch.tensor(x, device=Config.DEVICE, dtype=torch.float32)
return x
def range_tensor(end):
return torch.arange(end).long().to(Config.DEVICE)
def to_np(t):
return t.cpu().detach().numpy()
def random_seed(seed=None):
np.random.seed(seed)
torch.manual_seed(np.random.randint(int(1e6)))
def set_one_thread():
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
torch.set_num_threads(1)
def huber(x, k=1.0):
return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
def epsilon_greedy(epsilon, x):
if len(x.shape) == 1:
return np.random.randint(len(x)) if np.random.rand() < epsilon else np.argmax(x)
elif len(x.shape) == 2:
random_actions = np.random.randint(x.shape[1], size=x.shape[0])
greedy_actions = np.argmax(x, axis=-1)
dice = np.random.rand(x.shape[0])
return np.where(dice < epsilon, random_actions, greedy_actions)
def sync_grad(target_network, src_network):
for param, src_param in zip(target_network.parameters(), src_network.parameters()):
if src_param.grad is not None:
param._grad = src_param.grad.clone()
# adapted from https://github.com/pytorch/pytorch/issues/12160
def batch_diagonal(input):
# idea from here: https://discuss.pytorch.org/t/batch-of-diagonal-matrix/13560
# batches a stack of vectors (batch x N) -> a stack of diagonal matrices (batch x N x N)
# works in 2D -> 3D, should also work in higher dimensions
# make a zero matrix, which duplicates the last dim of input
dims = input.size()
dims = dims + dims[-1:]
output = torch.zeros(dims, device=input.device)
# stride across the first dimensions, add one to get the diagonal of the last dimension
strides = [output.stride(i) for i in range(input.dim() - 1)]
strides.append(output.size(-1) + 1)
# stride and copy the input to the diagonal
output.as_strided(input.size(), strides).copy_(input)
return output
def batch_trace(input):
i = range_tensor(input.size(-1))
t = input[:, i, i].sum(-1).unsqueeze(-1).unsqueeze(-1)
return t
class DiagonalNormal:
def __init__(self, mean, std):
self.dist = torch.distributions.Normal(mean, std)
self.sample = self.dist.sample
def log_prob(self, action):
return self.dist.log_prob(action).sum(-1).unsqueeze(-1)
def entropy(self):
return self.dist.entropy().sum(-1).unsqueeze(-1)
def cdf(self, action):
return self.dist.cdf(action).prod(-1).unsqueeze(-1)
class BatchCategorical:
def __init__(self, logits):
self.pre_shape = logits.size()[:-1]
logits = logits.view(-1, logits.size(-1))
self.dist = torch.distributions.Categorical(logits=logits)
def log_prob(self, action):
log_pi = self.dist.log_prob(action.view(-1))
log_pi = log_pi.view(action.size()[:-1] + (-1,))
return log_pi
def entropy(self):
ent = self.dist.entropy()
ent = ent.view(self.pre_shape + (-1,))
return ent
def sample(self, sample_shape=torch.Size([])):
ret = self.dist.sample(sample_shape)
ret = ret.view(sample_shape + self.pre_shape + (-1,))
return ret
class Grad:
def __init__(self, network=None, grads=None):
if grads is not None:
self.grads = grads
else:
self.grads = []
for param in network.parameters():
self.grads.append(torch.zeros(param.data.size(), device=Config.DEVICE))
def add(self, op):
if isinstance(op, Grad):
for grad, op_grad in zip(self.grads, op.grads):
grad.add_(op_grad)
elif isinstance(op, torch.nn.Module):
for grad, param in zip(self.grads, op.parameters()):
if param.grad is not None:
grad.add_(param.grad)
return self
def mul(self, coef):
for grad in self.grads:
grad.mul_(coef)
return self
def assign(self, network):
for grad, param in zip(self.grads, network.parameters()):
param._grad = grad.clone()
def zero(self):
for grad in self.grads:
grad.zero_()
def clone(self):
return Grad(grads=[grad.clone() for grad in self.grads])
class Grads:
def __init__(self, network=None, n=0, grads=None):
if grads is not None:
self.grads = grads
else:
self.grads = [Grad(network) for _ in range(n)]
def clone(self):
return Grads(grads=[grad.clone() for grad in self.grads])
def mul(self, op):
if np.isscalar(op):
for grad in self.grads:
grad.mul(op)
elif isinstance(op, torch.Tensor):
op = op.view(-1)
for i, grad in enumerate(self.grads):
grad.mul(op[i])
else:
raise NotImplementedError
return self
def add(self, op):
if np.isscalar(op):
for grad in self.grads:
grad.mul(op)
elif isinstance(op, Grads):
for grad, op_grad in zip(self.grads, op.grads):
grad.add(op_grad)
elif isinstance(op, torch.Tensor):
op = op.view(-1)
for i, grad in enumerate(self.grads):
grad.mul(op[i])
else:
raise NotImplementedError
return self
def mean(self):
grad = self.grads[0].clone()
grad.zero()
for g in self.grads:
grad.add(g)
grad.mul(1 / len(self.grads))
return grad
def escape_float(x):
return ('%s' % x).replace('.', '\.')
class MetaTensor(MetaModule):
def __init__(self, data):
super(MetaTensor, self).__init__()
self.meta_tensor = nn.Parameter(data, requires_grad=True)
def forward(self, params):
if params is None:
params = OrderedDict(self.named_parameters())
return params.get('meta_tensor', None) | []
| []
| [
"MKL_NUM_THREADS",
"OMP_NUM_THREADS"
]
| [] | ["MKL_NUM_THREADS", "OMP_NUM_THREADS"] | python | 2 | 0 | |
pilot/docker_client.go | package pilot
import (
"context"
"fmt"
"io"
"os"
"strings"
"time"
dockertypes "github.com/docker/docker/api/types"
dockerevents "github.com/docker/docker/api/types/events"
dockerfilters "github.com/docker/docker/api/types/filters"
dockerclient "github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
)
type dockerClientFactory struct{}
var _ ClientFactory = &dockerClientFactory{}
// NewClient new docker client by factory
func (f *dockerClientFactory) NewClient(endpoint string, timeout time.Duration) (c Client, err error) {
if os.Getenv("DOCKER_API_VERSION") == "" {
os.Setenv("DOCKER_API_VERSION", "1.23")
}
var (
client *dockerclient.Client
)
client, err = dockerclient.NewClientWithOpts(dockerclient.FromEnv)
if err != nil {
return nil, err
}
c = &dockerClientImpl{
client: client,
}
return
}
var _ Client = &dockerClientImpl{}
type dockerClientImpl struct {
client *dockerclient.Client
stopWatcher chan error
}
func (c *dockerClientImpl) processEvent(msg dockerevents.Message, eventsChan chan ContainerEventMessage) error {
// Deliver the event by action type.
if msg.Action == "start" || msg.Action == "restart" {
eventsChan <- ContainerEventMessage{
ID: msg.ID,
ContainerID: msg.Actor.ID,
Action: CONTAINER_ACTION_START,
}
return nil
}
// Increase the monitoring of container Exit events and repair the log duplicate collection caused by the failure to delete the exited container in time
if msg.Action == "destroy" || msg.Action == "die" {
eventsChan <- ContainerEventMessage{
ID: msg.ID,
ContainerID: msg.Actor.ID,
Action: CONTAINER_ACTION_STOP,
}
return nil
}
// ignored.
return nil
}
// Start starts an event watcher for docker events
func (c *dockerClientImpl) Start(eventsChan chan ContainerEventMessage, errsChan chan error) error {
ctx := context.Background()
filter := dockerfilters.NewArgs()
filter.Add("type", "container")
options := dockertypes.EventsOptions{
Filters: filter,
}
dockerMsgs, dockerErrs := c.client.Events(ctx, options)
// Process the events received from the docker daemon,
// Stop while receive signal from stopWatcher.
go func() {
for {
select {
case msg := <-dockerMsgs:
if processErr := c.processEvent(msg, eventsChan); processErr != nil {
log.Errorf("report error while processing docker event: %v", processErr)
}
case err := <-dockerErrs:
log.Errorf("report %v while receiving docker event", err)
if err == io.EOF || err == io.ErrUnexpectedEOF {
return
}
// passive error and re-watch.
errsChan <- err
dockerMsgs, dockerErrs = c.client.Events(ctx, options)
case <-c.stopWatcher:
// no need to close?
}
}
}()
return nil
}
// Stop stop watching events
func (c *dockerClientImpl) Stop() error {
c.stopWatcher <- nil
return <-c.stopWatcher
}
func (c *dockerClientImpl) stateEnumToConst(stateEnum string) (state string, err error) {
switch stateEnum {
case "created":
state = CONTAINER_STATE_CREATED
return
case "running":
state = CONTAINER_STATE_RUNNING
return
case "paused":
state = CONTAINER_STATE_PAUSED
return
case "removing":
state = CONTAINER_STATE_REMOVING
return
case "exited":
state = CONTAINER_STATE_EXITED
return
case "dead":
state = CONTAINER_STATE_DEAD
return
default:
err = fmt.Errorf("unknown container state %s", stateEnum)
return
}
}
// ListContainers for listing docker containers
func (c *dockerClientImpl) ListContainers() ([]*ContainerData, error) {
opts := dockertypes.ContainerListOptions{}
containers, err := c.client.ContainerList(context.Background(), opts)
if err != nil {
return nil, err
}
var (
ctnrs []*ContainerData
)
for _, container := range containers {
containerID := container.ID
containerState, err := c.stateEnumToConst(container.State)
if err != nil {
return nil, err
}
// the other fields doesn't need in this function.
ctnrs = append(ctnrs, &ContainerData{
ID: containerID,
State: containerState,
})
}
return ctnrs, nil
}
// InspectContainer return a docker container info
func (c *dockerClientImpl) InspectContainer(id string) (*ContainerData, error) {
containerJSON, err := c.client.ContainerInspect(context.Background(), id)
if err != nil {
return nil, err
}
containerState, err := c.stateEnumToConst(containerJSON.State.Status)
if err != nil {
return nil, err
}
var (
containerEnvs []*ContainerEnv
)
for _, ce := range containerJSON.Config.Env {
parts := strings.SplitN(ce, "=", 2)
containerEnvs = append(containerEnvs, &ContainerEnv{
Key: parts[0],
Value: parts[1],
})
}
ctnr := &ContainerData{
ID: containerJSON.ID,
Name: containerJSON.Name,
State: containerState,
LogPath: containerJSON.LogPath,
Envs: containerEnvs,
Labels: containerJSON.Config.Labels,
Mounts: func(dockerMounts []dockertypes.MountPoint) []*ContainerMount {
cms := []*ContainerMount{}
for _, dm := range dockerMounts {
cms = append(cms, &ContainerMount{
Name: dm.Name,
Type: string(dm.Type),
Source: dm.Source,
Destination: dm.Destination,
Mode: dm.Mode,
})
}
return cms
}(containerJSON.Mounts),
}
return ctnr, nil
}
| [
"\"DOCKER_API_VERSION\""
]
| []
| [
"DOCKER_API_VERSION"
]
| [] | ["DOCKER_API_VERSION"] | go | 1 | 0 | |
py/tests/test_runbrick.py | import os
import logging
import shutil
import numpy as np
import fitsio
from legacypipe.catalog import read_fits_catalog
from tractor.basics import PointSource
from tractor.galaxy import DevGalaxy
from tractor.sersic import SersicGalaxy
from legacypipe import runbrick as lprunbrick
from legacypipe.survey import wcs_for_brick
from legacysim import setup_logging, LegacySurveySim, find_file, SimCatalog, BrickCatalog, runbrick, utils
from legacysim.batch import EnvironmentManager
logger = logging.getLogger('legacysim.test_runbrick')
setup_logging()
def generate_injected(brickname, zoom=(0,3600,0,3600), zoom_margin=5, mag_range=(19.,20.), shape_r_range=(0.,1.), size=2, seed=42):
brick = BrickCatalog().get_by_name(brickname)
wcs = wcs_for_brick(brick)
(x0,x1,y0,y1) = zoom
W = x1-x0-2*zoom_margin
H = y1-y0-2*zoom_margin
assert (W>0) and (H>0)
targetwcs = wcs.get_subimage(x0+zoom_margin, y0+zoom_margin, W, H)
radecbox = np.ravel([targetwcs.pixelxy2radec(x,y) for x,y in [(1,1),(W,H)]],order='F')
radecbox = np.concatenate([np.sort(radecbox[:2]),np.sort(radecbox[2:])])
injected = SimCatalog(size=size)
rng = np.random.RandomState(seed=seed)
injected.ra,injected.dec = utils.sample_ra_dec(radecbox=radecbox,size=injected.size,rng=rng)
injected.bx,injected.by = brick.get_xy_from_radec(injected.ra,injected.dec)
flux_range = utils.mag2nano(mag_range)
for b in ['g','r','z']:
injected.set('flux_%s' % b,rng.uniform(*flux_range,size=injected.size))
injected.sersic = injected.full(4)
ba = rng.uniform(0.2,1.,size=injected.size)
phi = rng.uniform(0,np.pi,size=injected.size)
injected.shape_e1,injected.shape_e2 = utils.get_shape_e1_e2(ba,phi)
injected.shape_r = rng.uniform(*shape_r_range,size=injected.size)
injected.brickname = injected.full(brickname)
return injected
def test_eq_legacypipe():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dir = 'out-testcase3-legacysim'
legacypipe_dir = 'out-testcase3-legacypipe'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
lprunbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--outdir', legacypipe_dir,
'--force-all',
'--threads', '1'])
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--outdir', output_dir,
'--force-all',
'--threads', 1])
legacypipe_fn = find_file(base_dir=legacypipe_dir,filetype='tractor',source='legacypipe',brickname=brickname)
tractor_legacypipe = SimCatalog(legacypipe_fn)
legacysim_fn = find_file(base_dir=output_dir,filetype='tractor',source='legacysim',brickname=brickname)
tractor_legacysim = SimCatalog(legacysim_fn)
assert tractor_legacysim == tractor_legacypipe
# check header
header_legacypipe = fitsio.read_header(legacypipe_fn)
header_legacysim = fitsio.read_header(legacysim_fn)
header_injected = fitsio.read_header(find_file(base_dir=output_dir,filetype='injected',brickname=brickname))
#assert len(header_legacysim) == len(header_injected)
for key in header_injected:
if key != 'PRODTYPE':
assert header_legacysim[key] == header_injected[key]
assert 'LEGSIMV' in header_legacysim
assert 'galsim' in [header_legacysim[key] for key in header_legacysim]
stages = [val for key,val in EnvironmentManager._shorts_stage.items() if key != 'wise_forced']
for stage in stages:
assert ('LSV_%s' % stage) in header_legacysim
# legacysim: version + comment (2), galsim (2) and OBV
assert len(header_legacysim) == len(header_legacypipe) + 2 + 2 + len(stages)
def test_simblobs():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dir = 'out-testcase3-legacysim'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
injected_fn = os.path.join(output_dir,'input_injected.fits')
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
injected = generate_injected(brickname,zoom=[1020,1070,2785,2815],mag_range=[19.,20.],shape_r_range=[0.,0.])
injected.writeto(injected_fn)
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--injected-fn', injected_fn,
'--outdir', output_dir,
'--threads',1])
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--injected-fn', injected_fn,
'--outdir', output_dir,
'--fileid', 1,
'--sim-blobs',
'--threads', 1])
tractor_simblobs = SimCatalog(find_file(base_dir=output_dir,filetype='tractor',source='legacysim',brickname=brickname,fileid=1))
indin = injected.match_radec(tractor_simblobs,radius_in_degree=0.05/3600.,nearest=True)[0]
assert indin.size == injected.size
tractor_all = SimCatalog(find_file(base_dir=output_dir,filetype='tractor',source='legacysim',brickname=brickname))
indin = tractor_all.match_radec(tractor_simblobs,radius_in_degree=0.001/3600.,nearest=True,return_distance=True)[0]
assert indin.size == tractor_simblobs.size
def test_case3():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dir = 'out-testcase3-legacysim'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
checkpoint_fn = os.path.join(output_dir, 'checkpoint.pickle')
if os.path.exists(checkpoint_fn):
os.unlink(checkpoint_fn)
injected_fn = os.path.join(output_dir,'input_injected.fits')
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
injected = generate_injected(brickname,zoom=[1020,1070,2785,2815],mag_range=[19.,20.],shape_r_range=[0.,0.])
injected.writeto(injected_fn)
for extra_args in [
['--plots','--plot-base',os.path.join(output_dir,'brick-%(brick)s')],
['--sim-stamp','tractor'],['--sim-stamp','galsim'],
['--sim-stamp','tractor','--add-sim-noise','gaussian'],
['--sim-stamp','tractor','--add-sim-noise','poisson'],
['--sim-stamp','galsim','--add-sim-noise','gaussian'],
['--sim-stamp','galsim','--add-sim-noise','poisson'],
['--sim-stamp','galsim','--add-sim-noise','gaussian','--nobj',0],
['--sim-stamp','galsim','--add-sim-noise','gaussian','--nobj',1],
['--sim-stamp','galsim','--add-sim-noise','gaussian','--rowstart',1,'--nobj',1],
['--sim-stamp','tractor','--col-radius',3600.]
]:
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--injected-fn', injected_fn,
'--outdir', output_dir,
'--seed', 0,
'--threads', 1] + extra_args)
# build-up truth
origin_ra = [244.77973,244.77828]
origin_dec = [12.07234,12.07250]
origin_type = [(DevGalaxy,SersicGalaxy),(PointSource,)]
injected = SimCatalog(injected_fn)
rowstart,nobj = 0,len(injected)
if '--rowstart' in extra_args: rowstart = extra_args[extra_args.index('--rowstart')+1]
if '--nobj' in extra_args: nobj = extra_args[extra_args.index('--nobj')+1]
injected = injected[rowstart:rowstart+nobj]
col_radius = 5.
if '--col-radius' in extra_args: col_radius = extra_args[extra_args.index('--col-radius')+1]
collided = injected.mask_collisions(radius_in_degree=col_radius/3600.)
injected = injected[~collided]
ra,dec = np.concatenate([origin_ra,injected.ra]),np.concatenate([origin_dec,injected.dec])
nsigmas = 50 # max tolerance
survey = LegacySurveySim(output_dir=output_dir,kwargs_simid={'rowstart':rowstart})
fn = survey.find_file('tractor',brick=brickname,output=True)
logger.info('Reading %s',fn)
tractor = SimCatalog(fn)
# first match ra,dec
assert len(tractor) == len(origin_ra) + len(injected), 'Found %d objects, injected %d sources' % (len(tractor),len(origin_ra) + len(injected))
# first match ra,dec
indin,indout,distance = utils.match_radec(ra,dec,tractor.ra,tractor.dec,radius_in_degree=0.08/3600.,nearest=True,return_distance=True)
assert len(indin) == len(tractor), 'Matched %d objects among %d sources' % (len(indin),len(tractor)) # all matches
indout = indout[np.argsort(indin)]
tractor_all = tractor[indout] # reorder such that len(origin_ra): are injected sources
# ra,dec tolerance
sigma = np.sqrt(((tractor_all.ra-ra)**2*tractor_all.ra_ivar + (tractor_all.dec-dec)**2*tractor_all.dec_ivar)/2.)
logger.info('Max angular distance is %.4f arcsec, %.4f sigmas',distance.max()*3600.,sigma.max())
assert np.all(sigma < nsigmas)
# flux tolerance
tractor = tractor_all[len(origin_ra):]
if tractor.size:
for b in ['g','r','z']:
diff = np.abs(tractor.get('flux_%s' % b) - injected.get('flux_%s' % b))
sigma = diff*np.sqrt(tractor.get('flux_ivar_%s' % b))
logger.info('Max flux diff in %s band is %.4f, %.4f sigmas',b,diff.max(),sigma.max())
assert np.all(sigma < nsigmas)
cat = read_fits_catalog(tractor_all)
logger.info('Read catalog: %s',cat)
assert len(cat) == len(tractor_all)
# check origin sources are of the correct type
for isrc,src in enumerate(cat[:len(origin_ra)]):
assert type(src) in origin_type[isrc]
# check injected sources are of the correct type
for isrc,src in enumerate(cat[len(origin_ra):]):
assert type(src) is PointSource
def test_case3_shape():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dir = 'out-testcase3-legacysim-shape'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
checkpoint_fn = os.path.join(output_dir, 'checkpoint.pickle')
if os.path.exists(checkpoint_fn):
os.unlink(checkpoint_fn)
injected_fn = os.path.join(output_dir,'input_injected.fits')
log_fn = os.path.join(output_dir,'log.out')
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
injected = generate_injected(brickname,zoom=[1020,1040,2785,2815],zoom_margin=5,mag_range=[19.,20.],size=1)
injected.shape_r = injected.full(2.)
injected.writeto(injected_fn)
for extra_args in [['--plots','--plot-base',os.path.join(output_dir,'brick-%(brick)s')],
['--sim-stamp','tractor'],['--sim-stamp','galsim'],
['--sim-stamp','tractor','--add-sim-noise','gaussian'],
['--sim-stamp','galsim','--add-sim-noise','poisson']
]:
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--injected-fn', injected_fn,
'--outdir', output_dir,
'--seed', 42,
'--threads', 2,
'--verbose', '--write-log', log_fn] + extra_args)
setup_logging(logging.INFO)
# input injected
injected = SimCatalog(injected_fn)
col_radius = 5.
if '--col-radius' in extra_args: col_radius = extra_args[extra_args.index('--cl-radius')+1]
collided = injected.mask_collisions(radius_in_degree=col_radius/3600.)
injected = injected[~collided]
# build-up truth
origin_ra = [244.77973,244.77828]
origin_dec = [12.07234,12.07250]
origin_type = [(DevGalaxy,SersicGalaxy),(PointSource,)]
ra,dec = np.concatenate([origin_ra,injected.ra]),np.concatenate([origin_dec,injected.dec])
nsigmas = 80 # max tolerance
survey = LegacySurveySim(output_dir=output_dir)
fn = survey.find_file('tractor',brick=brickname,output=True)
logger.info('Reading %s',fn)
tractor = SimCatalog(fn)
assert len(tractor) == len(origin_ra) + len(injected), 'Found %d objects, injected %d sources' % (len(tractor),len(origin_ra) + len(injected))
# first match ra,dec
indin,indout,distance = utils.match_radec(ra,dec,tractor.ra,tractor.dec,radius_in_degree=0.05/3600.,nearest=True,return_distance=True)
assert len(indin) == len(tractor), 'Matched %d objects among %d sources' % (len(indin),len(tractor)) # all matches
indout = indout[np.argsort(indin)]
tractor_all = tractor[indout] # reorder such that len(origin_ra): are injected sources
# ra,dec tolerance
sigma = np.sqrt(((tractor_all.ra-ra)**2*tractor_all.ra_ivar + (tractor_all.dec-dec)**2*tractor_all.dec_ivar)/2.)
logger.info('Max angular distance is %.4f arcsec, %.4f sigmas',distance.max()*3600.,sigma.max())
assert np.all(sigma < nsigmas)
# flux tolerance
tractor = tractor_all[len(origin_ra):]
for b in ['g','r','z']:
diff = np.abs(tractor.get('flux_%s' % b) - injected.get('flux_%s' % b))
sigma = diff*np.sqrt(tractor.get('flux_ivar_%s' % b))
logger.info('Max flux diff in %s band is %.4f, %.4f sigmas',b,diff.max(),sigma.max())
assert np.all(sigma < nsigmas)
for field in ['shape_e1','shape_e2','shape_r']:
diff = np.abs(tractor.get(field) - injected.get(field))
sigma = diff*np.sqrt(tractor.get('%s_ivar' % field))
logger.info('Max %s diff is %.4f, %.4f sigmas',field,diff.max(),sigma.max())
assert np.all(sigma < nsigmas)
cat = read_fits_catalog(tractor_all)
logger.info('Read catalog: %s',cat)
assert len(cat) == len(tractor_all)
for isrc,src in enumerate(cat[:len(origin_ra)]):
assert type(src) in origin_type[isrc]
# check injected sources are of the correct type
for isrc,src in enumerate(cat[len(origin_ra):]):
assert type(src) is DevGalaxy or type(src) is SersicGalaxy
def test_mzlsbass2():
survey_dir = os.path.join(os.path.dirname(__file__), 'mzlsbass2')
output_dir = 'out-mzlsbass2-legacysim'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
injected_fn = os.path.join(output_dir,'input_injected.fits')
log_fn = os.path.join(output_dir,'log.out')
brickname = '1773p595'
zoom = [1300,1500,700,900]
#injected = generate_injected(brickname,zoom=zoom,zoom_margin=10)
injected = generate_injected(brickname,zoom=[1300,1400,700,800],zoom_margin=10)
injected.writeto(injected_fn)
for extra_args in [['--plots','--plot-base',os.path.join(output_dir,'brick-%(brick)s')],
['--sim-stamp','tractor','--add-sim-noise','gaussian'],
['--sim-stamp','galsim','--add-sim-noise','poisson']
]:
runbrick.main(args=['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--force-all', '--no-write',
'--survey-dir', survey_dir,
'--injected-fn', injected_fn,
'--outdir', output_dir,
'--sim-blobs',
'--seed', 42,
'--verbose','--write-log', log_fn] + extra_args)
setup_logging(logging.INFO)
# input injected
injected = SimCatalog(injected_fn)
col_radius = 5.
if '--col-radius' in extra_args: col_radius = extra_args[extra_args.index('--cl-radius')+1]
collided = injected.mask_collisions(radius_in_degree=col_radius/3600.)
injected = injected[~collided]
nsigmas = 30 # max tolerance
survey = LegacySurveySim(output_dir=output_dir)
fn = survey.find_file('tractor',brick=brickname,output=True)
logger.info('Reading %s',fn)
tractor = SimCatalog(fn)
# first match ra,dec
indin,indout,distance = injected.match_radec(tractor,radius_in_degree=0.1/3600.,nearest=True,return_distance=True)
assert len(indin) == len(injected), 'Matched %d objects among %d injected sources' % (len(indin),len(injected))
indout = indout[np.argsort(indin)]
tractor = tractor[indout] # reorder such that len(origin_ra): are injected sources
# ra,dec tolerance
sigma = np.sqrt(((tractor.ra-injected.ra)**2*tractor.ra_ivar + (tractor.dec-injected.dec)**2*tractor.dec_ivar)/2.)
logger.info('Max angular distance is %.4f arcsec, %.4f sigmas',distance.max()*3600.,sigma.max())
assert np.all(sigma < nsigmas)
# flux tolerance
for b in ['g','r','z']:
diff = np.abs(tractor.get('flux_%s' % b) - injected.get('flux_%s' % b))
sigma = diff*np.sqrt(tractor.get('flux_ivar_%s' % b))
logger.info('Max flux diff in %s band is %.4f, %.4f sigmas',b,diff.max(),sigma.max())
assert np.all(sigma < nsigmas)
def test_rerun():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dirs = ['out-testcase3-legacysim-rerun-%d' % i for i in range(1,3)]
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
for output_dir in output_dirs:
checkpoint_fn = os.path.join(output_dir,'checkpoint.pickle')
if os.path.exists(checkpoint_fn):
os.unlink(checkpoint_fn)
injected_fn = os.path.join(output_dirs[0],'input_injected.fits')
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
injected = generate_injected(brickname,zoom=[1020,1070,2785,2815],mag_range=[19.,20.],shape_r_range=[0.,0.],size=2)
injected.writeto(injected_fn)
for extra_args in [['--sim-stamp','tractor','--add-sim-noise','gaussian']
]:
common_args = ['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise',
'--survey-dir', survey_dir,
'--seed', 42,
'--threads', 1] + extra_args
runbrick.main(args=common_args + ['--injected-fn',injected_fn, '--force-all', '--no-write','--outdir', output_dirs[0]])
fn = find_file(base_dir=output_dirs[0],filetype='tractor',brickname=brickname,source='legacysim')
tractor_ref = SimCatalog(fn)
for istages,stages in enumerate([['outliers','writecat'],['refs','fitblobs','writecat']]):
shutil.rmtree(output_dirs[1],ignore_errors=True)
for istage,stage in enumerate(stages):
args = common_args + ['--write-stage',stage,'--stage',stage,'--outdir',output_dirs[1]]
if istages == 0 or istage == 0:
args += ['--injected-fn',injected_fn]
assert '--force-all' not in args
runbrick.main(args=args)
fn = find_file(base_dir=output_dirs[1],filetype='tractor',brickname=brickname,source='legacysim')
tractor = SimCatalog(fn)
assert tractor == tractor_ref
def test_skipid():
survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')
output_dir = 'out-testcase3-legacysim-skipid'
os.environ['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')
os.environ['GAIA_CAT_VER'] = '2'
checkpoint_fn = os.path.join(output_dir,'checkpoint.pickle')
if os.path.exists(checkpoint_fn):
os.unlink(checkpoint_fn)
injected_fn = os.path.join(output_dir,'input_injected.fits')
brickname = '2447p120'
zoom = [1020,1070,2775,2815]
injected = generate_injected(brickname,zoom=[1020,1070,2785,2815],mag_range=[19.,20.],shape_r_range=[0.,0.],size=2)
rng = np.random.RandomState(seed=42)
injected.seed = rng.randint(int(2**32 - 1),size=injected.size)
injected.writeto(injected_fn)
for extra_args in [['--col-radius', 3600],
['--col-radius', -1],
]:
common_args = ['--brick', brickname, '--zoom', *map(str,zoom),
'--no-wise', '--no-write',
'--survey-dir', survey_dir,
'--outdir', output_dir,
'--col-radius', 3600,
'--threads', 1] + extra_args
runbrick.main(args=common_args + ['--force-all', '--injected-fn', injected_fn])
fn = find_file(base_dir=output_dir,filetype='injected',brickname=brickname,source='legacysim')
injected_skip0 = SimCatalog(fn)
assert np.all(injected_skip0.seed == injected.seed)
if '--col-radius' in extra_args and extra_args[extra_args.index('--col-radius')-1] > 3000:
assert (injected_skip0.collided.sum() > 0) and (injected_skip0.collided.sum() < injected_skip0.size)
runbrick.main(args=common_args + ['--skipid',1])
fn = find_file(base_dir=output_dir,filetype='injected',brickname=brickname,source='legacysim',skipid=1)
injected_skip1 = SimCatalog(fn)
for field in ['ra','dec']:
assert np.all(injected_skip1.get(field) == injected_skip0.get(field)[injected_skip0.collided])
if __name__ == '__main__':
test_eq_legacypipe()
test_simblobs()
test_case3()
test_case3_shape()
test_mzlsbass2()
test_rerun()
test_skipid()
| []
| []
| [
"GAIA_CAT_DIR",
"GAIA_CAT_VER"
]
| [] | ["GAIA_CAT_DIR", "GAIA_CAT_VER"] | python | 2 | 0 | |
app/apps.py | from django.apps import AppConfig
import os
class AppConfig(AppConfig):
name = 'app'
def ready(self):
# At cold start Django executes ready method twice, so we use some internal Django environment
# variables to only run it once
if os.environ.get('RUN_MAIN', None) != 'true':
return
# Starts the websocket server ONCE!
from app.websocket_server import start_server
start_server()
| []
| []
| [
"RUN_MAIN"
]
| [] | ["RUN_MAIN"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.