filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/nakagami/firebirdsql/wireprotocol.go
|
/*******************************************************************************
The MIT License (MIT)
Copyright (c) 2013-2019 Hajime Nakagami
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************/
package firebirdsql
import (
"bufio"
"bytes"
"container/list"
"crypto/rc4"
"database/sql/driver"
"encoding/hex"
"errors"
"fmt"
"math/big"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/kardianos/osext"
"gitlab.com/nyarla/go-crypt"
//"unsafe"
)
const (
PLUGIN_LIST = "Srp256,Srp,Legacy_Auth"
BUFFER_LEN = 1024
MAX_CHAR_LENGTH = 32767
BLOB_SEGMENT_SIZE = 32000
)
func _INFO_SQL_SELECT_DESCRIBE_VARS() []byte {
return []byte{
isc_info_sql_select,
isc_info_sql_describe_vars,
isc_info_sql_sqlda_seq,
isc_info_sql_type,
isc_info_sql_sub_type,
isc_info_sql_scale,
isc_info_sql_length,
isc_info_sql_null_ind,
isc_info_sql_field,
isc_info_sql_relation,
isc_info_sql_owner,
isc_info_sql_alias,
isc_info_sql_describe_end,
}
}
type wireChannel struct {
conn net.Conn
reader *bufio.Reader
writer *bufio.Writer
rc4reader *rc4.Cipher
rc4writer *rc4.Cipher
}
func newWireChannel(conn net.Conn) (wireChannel, error) {
var err error
c := new(wireChannel)
c.conn = conn
c.reader = bufio.NewReader(c.conn)
c.writer = bufio.NewWriter(c.conn)
return *c, err
}
func (c *wireChannel) setAuthKey(key []byte) (err error) {
c.rc4reader, err = rc4.NewCipher(key)
c.rc4writer, err = rc4.NewCipher(key)
return
}
func (c *wireChannel) Read(buf []byte) (n int, err error) {
if c.rc4reader != nil {
src := make([]byte, len(buf))
n, err = c.reader.Read(src)
c.rc4reader.XORKeyStream(buf, src[0:n])
return
}
return c.reader.Read(buf)
}
func (c *wireChannel) Write(buf []byte) (n int, err error) {
if c.rc4writer != nil {
dst := make([]byte, len(buf))
c.rc4writer.XORKeyStream(dst, buf)
written := 0
for written < len(buf) {
n, err = c.writer.Write(dst[written:])
if err != nil {
return
}
written += n
}
n = written
} else {
n, err = c.writer.Write(buf)
}
return
}
func (c *wireChannel) Flush() error {
return c.writer.Flush()
}
func (c *wireChannel) Close() error {
return c.conn.Close()
}
type wireProtocol struct {
buf []byte
conn wireChannel
dbHandle int32
addr string
protocolVersion int32
acceptArchitecture int32
acceptType int32
lazyResponseCount int
pluginName string
user string
password string
authData []byte
// Time Zone
timezone string
tzNameById map[int]string
tzIdByName map[string]int
}
func newWireProtocol(addr string, timezone string) (*wireProtocol, error) {
p := new(wireProtocol)
p.buf = make([]byte, 0, BUFFER_LEN)
p.addr = addr
conn, err := net.Dial("tcp", p.addr)
if err != nil {
return nil, err
}
p.conn, err = newWireChannel(conn)
p.timezone = timezone
return p, err
}
func (p *wireProtocol) packInt(i int32) {
// pack big endian int32
p.buf = append(p.buf, []byte{byte(i >> 24 & 0xFF), byte(i >> 16 & 0xFF), byte(i >> 8 & 0xFF), byte(i & 0xFF)}...)
}
func (p *wireProtocol) packBytes(b []byte) {
p.buf = append(p.buf, xdrBytes(b)...)
}
func (p *wireProtocol) packString(s string) {
p.buf = append(p.buf, xdrBytes([]byte(s))...)
}
func (p *wireProtocol) appendBytes(bs []byte) {
p.buf = append(p.buf, bs...)
}
func getSrpClientPublicBytes(clientPublic *big.Int) (bs []byte) {
b := bytes.NewBufferString(hex.EncodeToString(bigToBytes(clientPublic))).Bytes()
if len(b) > 254 {
bs = bytes.Join([][]byte{
[]byte{CNCT_specific_data, byte(255), 0}, b[:254],
[]byte{CNCT_specific_data, byte(len(b)-254) + 1, 1}, b[254:],
}, nil)
} else {
bs = bytes.Join([][]byte{
[]byte{CNCT_specific_data, byte(len(b)) + 1, 0}, b,
}, nil)
}
return bs
}
func (p *wireProtocol) uid(user string, password string, authPluginName string, wireCrypt bool, clientPublic *big.Int) []byte {
sysUser := os.Getenv("USER")
if sysUser == "" {
sysUser = os.Getenv("USERNAME")
}
hostname, _ := os.Hostname()
sysUserBytes := bytes.NewBufferString(sysUser).Bytes()
hostnameBytes := bytes.NewBufferString(hostname).Bytes()
pluginListNameBytes := bytes.NewBufferString(PLUGIN_LIST).Bytes()
pluginNameBytes := bytes.NewBufferString(authPluginName).Bytes()
userBytes := bytes.NewBufferString(strings.ToUpper(user)).Bytes()
var wireCryptByte byte
if wireCrypt {
wireCryptByte = 1
} else {
wireCryptByte = 0
}
var specific_data []byte
if authPluginName == "Srp" || authPluginName == "Srp256" {
specific_data = getSrpClientPublicBytes(clientPublic)
} else if authPluginName == "Legacy_Auth" {
b := bytes.NewBufferString(crypt.Crypt(password, "9z")[2:]).Bytes()
specific_data = bytes.Join([][]byte{
[]byte{CNCT_specific_data, byte(len(b)) + 1, 0}, b,
}, nil)
} else {
panic(fmt.Sprintf("Unknown plugin name:%s", authPluginName))
}
return bytes.Join([][]byte{
[]byte{CNCT_login, byte(len(userBytes))}, userBytes,
[]byte{CNCT_plugin_name, byte(len(pluginNameBytes))}, pluginNameBytes,
[]byte{CNCT_plugin_list, byte(len(pluginListNameBytes))}, pluginListNameBytes,
specific_data,
[]byte{CNCT_client_crypt, 4, wireCryptByte, 0, 0, 0},
[]byte{CNCT_user, byte(len(sysUserBytes))}, sysUserBytes,
[]byte{CNCT_host, byte(len(hostnameBytes))}, hostnameBytes,
[]byte{CNCT_user_verification, 0},
}, nil)
}
func (p *wireProtocol) sendPackets() (written int, err error) {
p.debugPrint("\tsendPackets():%v", p.buf)
n := 0
for written < len(p.buf) {
n, err = p.conn.Write(p.buf[written:])
if err != nil {
break
}
written += n
}
p.conn.Flush()
p.buf = make([]byte, 0, BUFFER_LEN)
return
}
func (p *wireProtocol) suspendBuffer() []byte {
p.debugPrint("\tsuspendBuffer():%v", p.buf)
buf := p.buf
p.buf = make([]byte, 0, BUFFER_LEN)
return buf
}
func (p *wireProtocol) resumeBuffer(buf []byte) {
p.debugPrint("\tresumeBuffer():%v", buf)
p.buf = buf
}
func (p *wireProtocol) recvPackets(n int) ([]byte, error) {
buf := make([]byte, n)
var err error
read := 0
totalRead := 0
for totalRead < n {
read, err = p.conn.Read(buf[totalRead:n])
if err != nil {
p.debugPrint("\trecvPackets():%v:%v", buf, err)
return buf, err
}
totalRead += read
}
p.debugPrint("\trecvPackets():%v:%v", buf, err)
return buf, err
}
func (p *wireProtocol) recvPacketsAlignment(n int) ([]byte, error) {
padding := n % 4
if padding > 0 {
padding = 4 - padding
}
buf, err := p.recvPackets(n + padding)
return buf[0:n], err
}
func (p *wireProtocol) _parse_status_vector() (*list.List, int, string, error) {
sql_code := 0
gds_code := 0
gds_codes := list.New()
num_arg := 0
message := ""
b, err := p.recvPackets(4)
n := bytes_to_bint32(b)
for n != isc_arg_end {
switch {
case n == isc_arg_gds:
b, err = p.recvPackets(4)
gds_code := int(bytes_to_bint32(b))
if gds_code != 0 {
gds_codes.PushBack(gds_code)
message += errmsgs[gds_code]
num_arg = 0
}
case n == isc_arg_number:
b, err = p.recvPackets(4)
num := int(bytes_to_bint32(b))
if gds_code == 335544436 {
sql_code = num
}
num_arg += 1
message = strings.Replace(message, "@"+strconv.Itoa(num_arg), strconv.Itoa(num), 1)
case n == isc_arg_string:
b, err = p.recvPackets(4)
nbytes := int(bytes_to_bint32(b))
b, err = p.recvPacketsAlignment(nbytes)
s := bytes_to_str(b)
num_arg += 1
message = strings.Replace(message, "@"+strconv.Itoa(num_arg), s, 1)
case n == isc_arg_interpreted:
b, err = p.recvPackets(4)
nbytes := int(bytes_to_bint32(b))
b, err = p.recvPacketsAlignment(nbytes)
s := bytes_to_str(b)
message += s
case n == isc_arg_sql_state:
b, err = p.recvPackets(4)
nbytes := int(bytes_to_bint32(b))
b, err = p.recvPacketsAlignment(nbytes)
_ = bytes_to_str(b) // skip status code
}
b, err = p.recvPackets(4)
n = bytes_to_bint32(b)
}
return gds_codes, sql_code, message, err
}
func (p *wireProtocol) _parse_op_response() (int32, []byte, []byte, error) {
b, err := p.recvPackets(16)
h := bytes_to_bint32(b[0:4]) // Object handle
oid := b[4:12] // Object ID
buf_len := int(bytes_to_bint32(b[12:])) // buffer length
buf, err := p.recvPacketsAlignment(buf_len)
gds_code_list, sql_code, message, err := p._parse_status_vector()
if gds_code_list.Len() > 0 || sql_code != 0 {
err = errors.New(message)
}
return h, oid, buf, err
}
func (p *wireProtocol) _parse_connect_response(user string, password string, options map[string]string, clientPublic *big.Int, clientSecret *big.Int) (err error) {
p.debugPrint("_parse_connect_response")
wire_crypt := true
wire_crypt, _ = strconv.ParseBool(options["wire_crypt"])
b, err := p.recvPackets(4)
opcode := bytes_to_bint32(b)
for opcode == op_dummy {
b, _ = p.recvPackets(4)
opcode = bytes_to_bint32(b)
}
if opcode == op_reject {
err = errors.New("_parse_connect_response() op_reject")
return
}
if opcode == op_response {
_, _, _, err = p._parse_op_response() // error occured
return
}
b, _ = p.recvPackets(12)
p.protocolVersion = int32(b[3])
p.acceptArchitecture = bytes_to_bint32(b[4:8])
p.acceptType = bytes_to_bint32(b[8:12])
if opcode == op_cond_accept || opcode == op_accept_data {
var readLength, ln int
b, _ := p.recvPackets(4)
ln = int(bytes_to_bint32(b))
data, _ := p.recvPacketsAlignment(ln)
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
pluginName, _ := p.recvPacketsAlignment(ln)
p.pluginName = bytes_to_str(pluginName)
b, _ = p.recvPackets(4)
isAuthenticated := bytes_to_bint32(b)
readLength += 4
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
_, _ = p.recvPacketsAlignment(ln) // keys
var authData []byte
var sessionKey []byte
if isAuthenticated == 0 {
if p.pluginName == "Srp" || p.pluginName == "Srp256" {
// TODO: normalize user
if len(data) == 0 {
p.opContAuth(bigToBytes(clientPublic), p.pluginName, PLUGIN_LIST, "")
b, _ := p.recvPackets(4)
if DEBUG_SRP && bytes_to_bint32(b) == op_cont_auth {
panic("auth error")
}
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
data, _ = p.recvPacketsAlignment(ln)
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
_, _ = p.recvPacketsAlignment(ln) // pluginName
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
_, _ = p.recvPacketsAlignment(ln) // pluginList
b, _ = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
_, _ = p.recvPacketsAlignment(ln) // keys
}
ln = int(bytes_to_int16(data[:2]))
serverSalt := data[2 : ln+2]
serverPublic := bigFromHexString(bytes_to_str(data[4+ln:]))
authData, sessionKey = getClientProof(strings.ToUpper(user), password, serverSalt, clientPublic, serverPublic, clientSecret, p.pluginName)
if DEBUG_SRP {
fmt.Printf("pluginName=%s\nserverSalt=%s\nserverPublic(bin)=%s\nserverPublic=%s\nauthData=%v,sessionKey=%v\n",
p.pluginName, serverSalt, data[4+ln:], serverPublic, authData, sessionKey)
}
} else if p.pluginName == "Legacy_Auth" {
authData = bytes.NewBufferString(crypt.Crypt(password, "9z")[2:]).Bytes()
} else {
err = errors.New("_parse_connect_response() Unauthorized")
return
}
}
if opcode == op_cond_accept {
p.opContAuth(authData, options["auth_plugin_name"], PLUGIN_LIST, "")
_, _, _, err = p.opResponse()
if err != nil {
return
}
}
if wire_crypt && sessionKey != nil {
// Send op_crypt
p.packInt(op_crypt)
p.packString("Arc4")
p.packString("Symmetric")
p.sendPackets()
p.conn.setAuthKey(sessionKey)
_, _, _, err = p.opResponse()
if err != nil {
return
}
} else {
p.authData = authData // use later opAttach and opCreate
}
} else {
if opcode != op_accept {
err = errors.New("_parse_connect_response() protocol error")
return
}
}
return
}
func (p *wireProtocol) _parse_select_items(buf []byte, xsqlda []xSQLVAR) (int, error) {
var err error
var ln int
index := 0
i := 0
for item := int(buf[i]); item != isc_info_end; item = int(buf[i]) {
i++
switch item {
case isc_info_sql_sqlda_seq:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
index = int(bytes_to_int32(buf[i : i+ln]))
i += ln
case isc_info_sql_type:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
sqltype := int(bytes_to_int32(buf[i : i+ln]))
if sqltype%2 != 0 {
sqltype--
}
xsqlda[index-1].sqltype = sqltype
i += ln
case isc_info_sql_sub_type:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].sqlsubtype = int(bytes_to_int32(buf[i : i+ln]))
i += ln
case isc_info_sql_scale:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].sqlscale = int(bytes_to_int32(buf[i : i+ln]))
i += ln
case isc_info_sql_length:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].sqllen = int(bytes_to_int32(buf[i : i+ln]))
i += ln
case isc_info_sql_null_ind:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].null_ok = bytes_to_int32(buf[i:i+ln]) != 0
i += ln
case isc_info_sql_field:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].fieldname = bytes_to_str(buf[i : i+ln])
i += ln
case isc_info_sql_relation:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].relname = bytes_to_str(buf[i : i+ln])
i += ln
case isc_info_sql_owner:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].ownname = bytes_to_str(buf[i : i+ln])
i += ln
case isc_info_sql_alias:
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
xsqlda[index-1].aliasname = bytes_to_str(buf[i : i+ln])
i += ln
case isc_info_truncated:
return index, err // return next index
case isc_info_sql_describe_end:
/* NOTHING */
default:
err = errors.New(fmt.Sprintf("Invalid item [%02x] ! i=%d", buf[i], i))
break
}
}
return -1, err // no more info
}
func (p *wireProtocol) parse_xsqlda(buf []byte, stmtHandle int32) (int32, []xSQLVAR, error) {
var ln, col_len, next_index int
var err error
var stmt_type int32
var xsqlda []xSQLVAR
i := 0
for i < len(buf) {
if buf[i] == byte(isc_info_sql_stmt_type) && buf[i+1] == byte(0x04) && buf[i+2] == byte(0x00) {
i += 1
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
stmt_type = int32(bytes_to_int32(buf[i : i+ln]))
i += ln
} else if buf[i] == byte(isc_info_sql_select) && buf[i+1] == byte(isc_info_sql_describe_vars) {
i += 2
ln = int(bytes_to_int16(buf[i : i+2]))
i += 2
col_len = int(bytes_to_int32(buf[i : i+ln]))
xsqlda = make([]xSQLVAR, col_len)
next_index, err = p._parse_select_items(buf[i+ln:], xsqlda)
for next_index > 0 { // more describe vars
p.opInfoSql(stmtHandle,
bytes.Join([][]byte{
[]byte{isc_info_sql_sqlda_start, 2},
int16_to_bytes(int16(next_index)),
_INFO_SQL_SELECT_DESCRIBE_VARS(),
}, nil))
_, _, buf, err = p.opResponse()
// buf[:2] == []byte{0x04,0x07}
ln = int(bytes_to_int16(buf[2:4]))
// bytes_to_int(buf[4:4+l]) == col_len
next_index, err = p._parse_select_items(buf[4+ln:], xsqlda)
}
} else {
break
}
}
for i, _ := range xsqlda {
xsqlda[i].wp = p
}
return stmt_type, xsqlda, err
}
func (p *wireProtocol) getBlobSegments(blobId []byte, transHandle int32) ([]byte, error) {
suspendBuf := p.suspendBuffer()
blob := []byte{}
p.opOpenBlob(blobId, transHandle)
blobHandle, _, _, err := p.opResponse()
if err != nil {
p.resumeBuffer(suspendBuf)
return nil, err
}
var rbuf []byte
var more_data int32
more_data = 1
for more_data != 2 {
p.opGetSegment(blobHandle)
more_data, _, rbuf, err = p.opResponse()
buf := rbuf
for len(buf) > 0 {
ln := int(bytes_to_int16(buf[0:2]))
blob = append(blob, buf[2:ln+2]...)
buf = buf[ln+2:]
}
}
p.opCloseBlob(blobHandle)
if p.acceptType == ptype_lazy_send {
p.lazyResponseCount++
} else {
_, _, _, err = p.opResponse()
}
p.resumeBuffer(suspendBuf)
return blob, err
}
func (p *wireProtocol) opConnect(dbName string, user string, password string, options map[string]string, clientPublic *big.Int) {
p.debugPrint("opConnect")
wire_crypt := true
wire_crypt, _ = strconv.ParseBool(options["wire_crypt"])
protocols := []string{
// PROTOCOL_VERSION, Arch type (Generic=1), min, max, weight
"0000000a00000001000000000000000500000002", // 10, 1, 0, 5, 2
"ffff800b00000001000000000000000500000004", // 11, 1, 0, 5, 4
"ffff800c00000001000000000000000500000006", // 12, 1, 0, 5, 6
"ffff800d00000001000000000000000500000008", // 13, 1, 0, 5, 8
}
p.packInt(op_connect)
p.packInt(op_attach)
p.packInt(3) // CONNECT_VERSION3
p.packInt(1) // Arch type(GENERIC)
p.packString(dbName)
p.packInt(int32(len(protocols)))
p.packBytes(p.uid(strings.ToUpper(user), password, options["auth_plugin_name"], wire_crypt, clientPublic))
buf, _ := hex.DecodeString(strings.Join(protocols, ""))
p.appendBytes(buf)
p.sendPackets()
}
func (p *wireProtocol) opCreate(dbName string, user string, password string, role string) {
p.debugPrint("opCreate")
var page_size int32
page_size = 4096
encode := bytes.NewBufferString("UTF8").Bytes()
userBytes := bytes.NewBufferString(strings.ToUpper(user)).Bytes()
passwordBytes := bytes.NewBufferString(password).Bytes()
roleBytes := []byte(role)
dpb := bytes.Join([][]byte{
[]byte{isc_dpb_version1},
[]byte{isc_dpb_set_db_charset, byte(len(encode))}, encode,
[]byte{isc_dpb_lc_ctype, byte(len(encode))}, encode,
[]byte{isc_dpb_user_name, byte(len(userBytes))}, userBytes,
[]byte{isc_dpb_password, byte(len(passwordBytes))}, passwordBytes,
[]byte{isc_dpb_sql_role_name, byte(len(roleBytes))}, roleBytes,
[]byte{isc_dpb_sql_dialect, 4}, int32_to_bytes(3),
[]byte{isc_dpb_force_write, 4}, bint32_to_bytes(1),
[]byte{isc_dpb_overwrite, 4}, bint32_to_bytes(1),
[]byte{isc_dpb_page_size, 4}, int32_to_bytes(page_size),
}, nil)
if p.authData != nil {
specificAuthData := bytes.NewBufferString(hex.EncodeToString(p.authData)).Bytes()
dpb = bytes.Join([][]byte{
dpb,
[]byte{isc_dpb_specific_auth_data, byte(len(specificAuthData))}, specificAuthData}, nil)
}
if p.timezone != "" {
tznameBytes := []byte(p.timezone)
dpb = bytes.Join([][]byte{
dpb,
[]byte{isc_dpb_session_time_zone, byte(len(tznameBytes))}, tznameBytes}, nil)
}
p.packInt(op_create)
p.packInt(0) // Database Object ID
p.packString(dbName)
p.packBytes(dpb)
p.sendPackets()
}
func (p *wireProtocol) opAttach(dbName string, user string, password string, role string) {
p.debugPrint("opAttach")
encode := bytes.NewBufferString("UTF8").Bytes()
userBytes := bytes.NewBufferString(strings.ToUpper(user)).Bytes()
passwordBytes := bytes.NewBufferString(password).Bytes()
roleBytes := []byte(role)
processName, err := osext.Executable()
var processNameBytes []byte
if err == nil {
if len(processName) > 255 {
//limit process name to last 255 symbols
processName = processName[len(processName)-255:]
}
processNameBytes = bytes.NewBufferString(processName).Bytes()
}
pid := int32(os.Getpid())
dpb := bytes.Join([][]byte{
[]byte{isc_dpb_version1},
[]byte{isc_dpb_sql_dialect, 4}, int32_to_bytes(3),
[]byte{isc_dpb_lc_ctype, byte(len(encode))}, encode,
[]byte{isc_dpb_user_name, byte(len(userBytes))}, userBytes,
[]byte{isc_dpb_password, byte(len(passwordBytes))}, passwordBytes,
[]byte{isc_dpb_sql_role_name, byte(len(roleBytes))}, roleBytes,
[]byte{isc_dpb_process_id, 4}, int32_to_bytes(pid),
[]byte{isc_dpb_process_name, byte(len(processNameBytes))}, processNameBytes,
}, nil)
if p.authData != nil {
specificAuthData := bytes.NewBufferString(hex.EncodeToString(p.authData)).Bytes()
dpb = bytes.Join([][]byte{
dpb,
[]byte{isc_dpb_specific_auth_data, byte(len(specificAuthData))}, specificAuthData}, nil)
}
if p.timezone != "" {
tznameBytes := []byte(p.timezone)
dpb = bytes.Join([][]byte{
dpb,
[]byte{isc_dpb_session_time_zone, byte(len(tznameBytes))}, tznameBytes}, nil)
}
p.packInt(op_attach)
p.packInt(0) // Database Object ID
p.packString(dbName)
p.packBytes(dpb)
p.sendPackets()
}
func (p *wireProtocol) opContAuth(authData []byte, authPluginName string, authPluginList string, keys string) {
p.debugPrint("opContAuth")
p.packInt(op_cont_auth)
p.packString(hex.EncodeToString(authData))
p.packString(authPluginName)
p.packString(authPluginList)
p.packString(keys)
p.sendPackets()
}
func (p *wireProtocol) opDropDatabase() {
p.debugPrint("opDropDatabase")
p.packInt(op_drop_database)
p.packInt(p.dbHandle)
p.sendPackets()
}
func (p *wireProtocol) opTransaction(tpb []byte) {
p.debugPrint("opTransaction")
p.packInt(op_transaction)
p.packInt(p.dbHandle)
p.packBytes(tpb)
p.sendPackets()
}
func (p *wireProtocol) opCommit(transHandle int32) {
p.debugPrint("opCommit():%d", transHandle)
p.packInt(op_commit)
p.packInt(transHandle)
p.sendPackets()
}
func (p *wireProtocol) opCommitRetaining(transHandle int32) {
p.debugPrint("opCommitRetaining():%d", transHandle)
p.packInt(op_commit_retaining)
p.packInt(transHandle)
p.sendPackets()
}
func (p *wireProtocol) opRollback(transHandle int32) {
p.debugPrint("opRollback():%d", transHandle)
p.packInt(op_rollback)
p.packInt(transHandle)
p.sendPackets()
}
func (p *wireProtocol) opRollbackRetaining(transHandle int32) {
p.debugPrint("opRollbackRetaining():%d", transHandle)
p.packInt(op_rollback_retaining)
p.packInt(transHandle)
p.sendPackets()
}
func (p *wireProtocol) opAllocateStatement() {
p.debugPrint("opAllocateStatement")
p.packInt(op_allocate_statement)
p.packInt(p.dbHandle)
p.sendPackets()
}
func (p *wireProtocol) opInfoTransaction(transHandle int32, b []byte) {
p.debugPrint("opInfoTransaction")
p.packInt(op_info_transaction)
p.packInt(transHandle)
p.packInt(0)
p.packBytes(b)
p.packInt(int32(BUFFER_LEN))
p.sendPackets()
}
func (p *wireProtocol) opInfoDatabase(bs []byte) {
p.debugPrint("opInfoDatabase")
p.packInt(op_info_database)
p.packInt(p.dbHandle)
p.packInt(0)
p.packBytes(bs)
p.packInt(int32(BUFFER_LEN))
p.sendPackets()
}
func (p *wireProtocol) opFreeStatement(stmtHandle int32, mode int32) {
p.debugPrint("opFreeStatement:<%v>", stmtHandle)
p.packInt(op_free_statement)
p.packInt(stmtHandle)
p.packInt(mode)
p.sendPackets()
}
func (p *wireProtocol) opPrepareStatement(stmtHandle int32, transHandle int32, query string) {
p.debugPrint("opPrepareStatement():%d,%d,%v", transHandle, stmtHandle, query)
bs := bytes.Join([][]byte{
[]byte{isc_info_sql_stmt_type},
_INFO_SQL_SELECT_DESCRIBE_VARS(),
}, nil)
p.packInt(op_prepare_statement)
p.packInt(transHandle)
p.packInt(stmtHandle)
p.packInt(3) // dialect = 3
p.packString(query)
p.packBytes(bs)
p.packInt(int32(BUFFER_LEN))
p.sendPackets()
}
func (p *wireProtocol) opInfoSql(stmtHandle int32, vars []byte) {
p.debugPrint("opInfoSql")
p.packInt(op_info_sql)
p.packInt(stmtHandle)
p.packInt(0)
p.packBytes(vars)
p.packInt(int32(BUFFER_LEN))
p.sendPackets()
}
func (p *wireProtocol) opExecute(stmtHandle int32, transHandle int32, params []driver.Value) {
p.debugPrint("opExecute():%d,%d,%v", transHandle, stmtHandle, params)
p.packInt(op_execute)
p.packInt(stmtHandle)
p.packInt(transHandle)
if len(params) == 0 {
p.packInt(0) // packBytes([])
p.packInt(0)
p.packInt(0)
p.sendPackets()
} else {
blr, values := p.paramsToBlr(transHandle, params, p.protocolVersion)
p.packBytes(blr)
p.packInt(0)
p.packInt(1)
p.appendBytes(values)
p.sendPackets()
}
}
func (p *wireProtocol) opExecute2(stmtHandle int32, transHandle int32, params []driver.Value, outputBlr []byte) {
p.debugPrint("opExecute2")
p.packInt(op_execute2)
p.packInt(stmtHandle)
p.packInt(transHandle)
if len(params) == 0 {
p.packInt(0) // packBytes([])
p.packInt(0)
p.packInt(0)
} else {
blr, values := p.paramsToBlr(transHandle, params, p.protocolVersion)
p.packBytes(blr)
p.packInt(0)
p.packInt(1)
p.appendBytes(values)
}
p.packBytes(outputBlr)
p.packInt(0)
p.sendPackets()
}
func (p *wireProtocol) opFetch(stmtHandle int32, blr []byte) {
p.debugPrint("opFetch")
p.packInt(op_fetch)
p.packInt(stmtHandle)
p.packBytes(blr)
p.packInt(0)
p.packInt(400)
p.sendPackets()
}
func (p *wireProtocol) opFetchResponse(stmtHandle int32, transHandle int32, xsqlda []xSQLVAR) (*list.List, bool, error) {
p.debugPrint("opFetchResponse")
b, err := p.recvPackets(4)
for bytes_to_bint32(b) == op_dummy {
b, _ = p.recvPackets(4)
}
for bytes_to_bint32(b) == op_response && p.lazyResponseCount > 0 {
p.lazyResponseCount--
p._parse_op_response()
b, _ = p.recvPackets(4)
}
if bytes_to_bint32(b) != op_fetch_response {
if bytes_to_bint32(b) == op_response {
_, _, _, err := p._parse_op_response()
if err != nil {
return nil, false, err
}
}
return nil, false, errors.New("opFetchResponse:Internal Error")
}
b, err = p.recvPackets(8)
status := bytes_to_bint32(b[:4])
count := int(bytes_to_bint32(b[4:8]))
rows := list.New()
for count > 0 {
r := make([]driver.Value, len(xsqlda))
if p.protocolVersion < PROTOCOL_VERSION13 {
for i, x := range xsqlda {
var ln int
if x.ioLength() < 0 {
b, err = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
} else {
ln = x.ioLength()
}
raw_value, _ := p.recvPacketsAlignment(ln)
b, err = p.recvPackets(4)
if bytes_to_bint32(b) == 0 { // Not NULL
r[i], err = x.value(raw_value)
}
}
} else { // PROTOCOL_VERSION13
bi256 := big.NewInt(256)
n := len(xsqlda) / 8
if len(xsqlda)%8 != 0 {
n++
}
null_indicator := new(big.Int)
b, _ := p.recvPacketsAlignment(n)
for n = len(b); n > 0; n-- {
null_indicator = null_indicator.Mul(null_indicator, bi256)
bi := big.NewInt(int64(b[n-1]))
null_indicator = null_indicator.Add(null_indicator, bi)
}
for i, x := range xsqlda {
if null_indicator.Bit(i) != 0 {
continue
}
var ln int
if x.ioLength() < 0 {
b, err = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
} else {
ln = x.ioLength()
}
raw_value, _ := p.recvPacketsAlignment(ln)
r[i], err = x.value(raw_value)
}
}
rows.PushBack(r)
b, err = p.recvPackets(12)
// op := int(bytes_to_bint32(b[:4]))
status = bytes_to_bint32(b[4:8])
count = int(bytes_to_bint32(b[8:]))
}
return rows, status != 100, err
}
func (p *wireProtocol) opDetach() {
p.debugPrint("opDetach")
p.packInt(op_detach)
p.packInt(p.dbHandle)
p.sendPackets()
}
func (p *wireProtocol) opOpenBlob(blobId []byte, transHandle int32) {
p.debugPrint("opOpenBlob")
p.packInt(op_open_blob)
p.packInt(transHandle)
p.appendBytes(blobId)
p.sendPackets()
}
func (p *wireProtocol) opCreateBlob2(transHandle int32) {
p.debugPrint("opCreateBlob2")
p.packInt(op_create_blob2)
p.packInt(0)
p.packInt(transHandle)
p.packInt(0)
p.packInt(0)
p.sendPackets()
}
func (p *wireProtocol) opGetSegment(blobHandle int32) {
p.debugPrint("opGetSegment")
p.packInt(op_get_segment)
p.packInt(blobHandle)
p.packInt(int32(BUFFER_LEN))
p.packInt(0)
p.sendPackets()
}
func (p *wireProtocol) opPutSegment(blobHandle int32, seg_data []byte) {
p.debugPrint("opPutSegment")
ln := len(seg_data)
p.packInt(op_put_segment)
p.packInt(blobHandle)
p.packInt(int32(ln))
p.packInt(int32(ln))
p.appendBytes(seg_data)
padding := [3]byte{0x0, 0x0, 0x0}
p.appendBytes(padding[:((4 - ln) & 3)])
p.sendPackets()
}
func (p *wireProtocol) opBatchSegments(blobHandle int32, seg_data []byte) {
p.debugPrint("opBatchSegments")
ln := len(seg_data)
p.packInt(op_batch_segments)
p.packInt(blobHandle)
p.packInt(int32(ln + 2))
p.packInt(int32(ln + 2))
pad_length := ((4 - (ln + 2)) & 3)
padding := make([]byte, pad_length)
p.packBytes([]byte{byte(ln & 255), byte(ln >> 8)}) // little endian int16
p.packBytes(seg_data)
p.packBytes(padding)
p.sendPackets()
}
func (p *wireProtocol) opCloseBlob(blobHandle int32) {
p.debugPrint("opCloseBlob")
p.packInt(op_close_blob)
p.packInt(blobHandle)
p.sendPackets()
}
func (p *wireProtocol) opResponse() (int32, []byte, []byte, error) {
p.debugPrint("opResponse")
b, _ := p.recvPackets(4)
for bytes_to_bint32(b) == op_dummy {
b, _ = p.recvPackets(4)
}
for bytes_to_bint32(b) == op_response && p.lazyResponseCount > 0 {
p.lazyResponseCount--
_, _, _, _ = p._parse_op_response()
b, _ = p.recvPackets(4)
}
if bytes_to_bint32(b) != op_response {
if DEBUG_SRP && bytes_to_bint32(b) == op_cont_auth {
panic("auth error")
}
return 0, nil, nil, errors.New(fmt.Sprintf("Error op_response:%d", bytes_to_bint32(b)))
}
return p._parse_op_response()
}
func (p *wireProtocol) opSqlResponse(xsqlda []xSQLVAR) ([]driver.Value, error) {
p.debugPrint("opSqlResponse")
b, err := p.recvPackets(4)
for bytes_to_bint32(b) == op_dummy {
b, err = p.recvPackets(4)
}
if bytes_to_bint32(b) != op_sql_response {
return nil, errors.New("Error op_sql_response")
}
b, err = p.recvPackets(4)
count := int(bytes_to_bint32(b))
if count == 0 {
return nil, nil
}
r := make([]driver.Value, len(xsqlda))
var ln int
if p.protocolVersion < PROTOCOL_VERSION13 {
for i, x := range xsqlda {
if x.ioLength() < 0 {
b, err = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
} else {
ln = x.ioLength()
}
raw_value, _ := p.recvPacketsAlignment(ln)
b, err = p.recvPackets(4)
if bytes_to_bint32(b) == 0 { // Not NULL
r[i], err = x.value(raw_value)
}
}
} else { // PROTOCOL_VERSION13
bi256 := big.NewInt(256)
n := len(xsqlda) / 8
if len(xsqlda)%8 != 0 {
n++
}
null_indicator := new(big.Int)
b, _ := p.recvPacketsAlignment(n)
for n = len(b); n > 0; n-- {
null_indicator = null_indicator.Mul(null_indicator, bi256)
bi := big.NewInt(int64(b[n-1]))
null_indicator = null_indicator.Add(null_indicator, bi)
}
for i, x := range xsqlda {
if null_indicator.Bit(i) != 0 {
continue
}
if x.ioLength() < 0 {
b, err = p.recvPackets(4)
ln = int(bytes_to_bint32(b))
} else {
ln = x.ioLength()
}
raw_value, _ := p.recvPacketsAlignment(ln)
r[i], err = x.value(raw_value)
}
}
return r, err
}
func (p *wireProtocol) createBlob(value []byte, transHandle int32) ([]byte, error) {
buf := p.suspendBuffer()
p.opCreateBlob2(transHandle)
blobHandle, blobId, _, err := p.opResponse()
if err != nil {
p.resumeBuffer(buf)
return blobId, err
}
i := 0
for i < len(value) {
end := i + BLOB_SEGMENT_SIZE
if end > len(value) {
end = len(value)
}
p.opPutSegment(blobHandle, value[i:end])
_, _, _, err := p.opResponse()
if err != nil {
break
}
i += BLOB_SEGMENT_SIZE
}
if err != nil {
p.resumeBuffer(buf)
return blobId, err
}
p.opCloseBlob(blobHandle)
_, _, _, err = p.opResponse()
p.resumeBuffer(buf)
return blobId, err
}
func (p *wireProtocol) paramsToBlr(transHandle int32, params []driver.Value, protocolVersion int32) ([]byte, []byte) {
// Convert parameter array to BLR and values format.
var v, blr []byte
bi256 := big.NewInt(256)
ln := len(params) * 2
blrList := list.New()
valuesList := list.New()
blrList.PushBack([]byte{5, 2, 4, 0, byte(ln & 255), byte(ln >> 8)})
if protocolVersion >= PROTOCOL_VERSION13 {
nullIndicator := new(big.Int)
for i := len(params) - 1; i >= 0; i-- {
if params[i] == nil {
nullIndicator.SetBit(nullIndicator, i, 1)
}
}
n := len(params) / 8
if len(params)%8 != 0 {
n++
}
if n%4 != 0 { // padding
n += 4 - n%4
}
for i := 0; i < n; i++ {
valuesList.PushBack([]byte{byte(nullIndicator.Mod(nullIndicator, bi256).Int64())})
nullIndicator = nullIndicator.Div(nullIndicator, bi256)
}
}
for _, param := range params {
switch f := param.(type) {
case string:
b := str_to_bytes(f)
if len(b) < MAX_CHAR_LENGTH {
blr, v = _bytesToBlr(b)
} else {
v, _ = p.createBlob(b, transHandle)
blr = []byte{9, 0}
}
case int:
blr, v = _int32ToBlr(int32(f))
case int16:
blr, v = _int32ToBlr(int32(f))
case int32:
blr, v = _int32ToBlr(f)
case int64:
blr, v = _int32ToBlr(int32(f))
case time.Time:
if f.Year() == 0 {
blr, v = _timeToBlr(f)
} else {
blr, v = _timestampToBlr(f)
}
case bool:
if f {
v = []byte{1, 0, 0, 0}
} else {
v = []byte{0, 0, 0, 0}
}
blr = []byte{23}
case nil:
v = []byte{}
blr = []byte{14, 0, 0}
case []byte:
if len(f) < MAX_CHAR_LENGTH {
blr, v = _bytesToBlr(f)
} else {
v, _ = p.createBlob(f, transHandle)
blr = []byte{9, 0}
}
default:
// can't convert directory
b := str_to_bytes(fmt.Sprintf("%v", f))
if len(b) < MAX_CHAR_LENGTH {
blr, v = _bytesToBlr(b)
} else {
v, _ = p.createBlob(b, transHandle)
blr = []byte{9, 0}
}
}
valuesList.PushBack(v)
if protocolVersion < PROTOCOL_VERSION13 {
if param == nil {
valuesList.PushBack([]byte{0xff, 0xff, 0xff, 0xff})
} else {
valuesList.PushBack([]byte{0, 0, 0, 0})
}
}
blrList.PushBack(blr)
blrList.PushBack([]byte{7, 0})
}
blrList.PushBack([]byte{255, 76}) // [blr_end, blr_eoc]
blr = flattenBytes(blrList)
v = flattenBytes(valuesList)
return blr, v
}
func (p *wireProtocol) debugPrint(s string, a ...interface{}) {
//if len(a) > 0 {
// s = fmt.Sprintf(s, a...)
//}
//fmt.Printf("[%x] %s\n", uintptr(unsafe.Pointer(p)), s)
}
|
[
"\"USER\"",
"\"USERNAME\""
] |
[] |
[
"USER",
"USERNAME"
] |
[]
|
["USER", "USERNAME"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"flag"
"fmt"
"os"
"strconv"
"github.com/go-redis/redis"
)
func main() {
act := flag.String("act", "producer", "Either: producer or consumer")
partition := flag.String("partition", "0",
"Partition which the consumer program will be subscribing")
flag.Parse()
fmt.Printf("Welcome to Account service: %s\n\n", *act)
switch *act {
case "producer":
mainProducer()
case "consumer":
if part32int, err := strconv.ParseInt(*partition, 10, 32); err == nil {
mainConsumer(int32(part32int))
}
}
}
var (
Redis = initRedis()
)
func initRedis() *redis.Client {
redisUrl := os.Getenv("REDIS_URL")
if redisUrl == "" {
redisUrl = "127.0.0.1:6379"
}
return redis.NewClient(&redis.Options{
Addr: redisUrl,
Password: "",
DB: 0,
})
}
|
[
"\"REDIS_URL\""
] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
go
| 1 | 0 | |
tests/out_test.go
|
package docker_image_resource_test
import (
"bytes"
"fmt"
"io/ioutil"
"os/exec"
"encoding/json"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Out", func() {
BeforeEach(func() {
os.Setenv("PATH", "/docker-image-resource/tests/fixtures/bin:"+os.Getenv("PATH"))
os.Setenv("SKIP_PRIVILEGED", "true")
os.Setenv("LOG_FILE", "/dev/stderr")
})
putWithEnv := func(params map[string]interface{}, extraEnv map[string]string) *gexec.Session {
command := exec.Command("/opt/resource/out", "/tmp")
// Get current process environment variables
newEnv := os.Environ()
if extraEnv != nil {
// Append each extra environment variable to new process environment
// variable list
for name, value := range extraEnv {
newEnv = append(newEnv, fmt.Sprintf("%s=%s", name, value))
}
}
command.Env = newEnv
resourceInput, err := json.Marshal(params)
Expect(err).ToNot(HaveOccurred())
command.Stdin = bytes.NewBuffer(resourceInput)
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
<-session.Exited
return session
}
put := func(params map[string]interface{}) *gexec.Session {
return putWithEnv(params, nil)
}
dockerarg := func(cmd string) string {
return "DOCKER ARG: " + cmd
}
docker := func(cmd string) string {
return "DOCKER: " + cmd
}
dockerd := func(cmd string) string {
return "DOCKERD: " + cmd
}
It("starts dockerd with --data-root under /scratch", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
},
})
Expect(session.Err).To(gbytes.Say(dockerd(`.*--data-root /scratch/docker.*`)))
})
Context("when build arguments are provided", func() {
It("passes the arguments correctly to the docker daemon", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
"build_args": map[string]string{
"arg1": "arg with space",
"arg2": "arg with\nnewline",
"arg3": "normal",
},
},
})
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg1=arg with space`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg2=arg with\nnewline`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg3=normal`)))
})
})
Context("when configured with limited up and download", func() {
It("passes them to dockerd", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
"max_concurrent_downloads": 7,
"max_concurrent_uploads": 1,
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
},
})
Expect(session.Err).To(gbytes.Say(dockerd(`.* --max-concurrent-downloads=7 --max-concurrent-uploads=1.*`)))
})
})
Context("when configured with a insecure registries", func() {
It("passes them to dockerd", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
"insecure_registries": []string{"my-registry.gov", "other-registry.biz"},
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
},
})
Expect(session.Err).To(gbytes.Say(dockerd(`.*--insecure-registry my-registry\.gov --insecure-registry other-registry\.biz.*`)))
})
})
Context("when configured with a registry mirror", func() {
It("passes it to dockerd", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
"registry_mirror": "some-mirror",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
},
})
Expect(session.Err).To(gbytes.Say(dockerd(`.*--registry-mirror some-mirror.*`)))
})
})
Context("When using ECR", func() {
It("calls docker pull with the ECR registry", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/ecr",
"dockerfile": "/docker-image-resource/tests/fixtures/ecr/Dockerfile",
},
})
Expect(session.Err).To(gbytes.Say(docker("pull 123123.dkr.ecr.us-west-2.amazonaws.com:443/testing")))
})
It("calls docker pull for an ECR image in a multi build docker file", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/ecr",
"dockerfile": "/docker-image-resource/tests/fixtures/ecr/Dockerfile.multi",
},
})
Expect(session.Err).To(gbytes.Say(docker("pull 123123.dkr.ecr.us-west-2.amazonaws.com:443/testing")))
})
It("calls docker pull for all ECR images in a multi build docker file", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/ecr",
"dockerfile": "/docker-image-resource/tests/fixtures/ecr/Dockerfile.multi-ecr",
},
})
Expect(session.Err).To(gbytes.Say(docker("pull 123123.dkr.ecr.us-west-2.amazonaws.com:443/testing")))
Expect(session.Err).To(gbytes.Say(docker("pull 123123.dkr.ecr.us-west-2.amazonaws.com:443/testing2")))
})
})
Context("When all proxy settings are provided with build args", func() {
It("passes the arguments correctly to the docker daemon", func() {
session := putWithEnv(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
"build_args": map[string]string{
"arg1": "arg with space",
"arg2": "arg with\nnewline",
"arg3": "normal",
},
},
}, map[string]string{
"no_proxy": "10.1.1.1",
"http_proxy": "http://admin:[email protected]:8080",
"https_proxy": "http://another.proxy.net",
})
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`http_proxy=http://admin:[email protected]:8080`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`https_proxy=http://another.proxy.net`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`no_proxy=10.1.1.1`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg1=arg with space`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg2=arg with\nnewline`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`arg3=normal`)))
})
})
Context("When only http_proxy setting is provided, with no build arguments", func() {
It("passes the arguments correctly to the docker daemon", func() {
session := putWithEnv(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
},
}, map[string]string{
"http_proxy": "http://admin:[email protected]:8080",
})
Expect(session.Err).To(gbytes.Say(dockerarg(`--build-arg`)))
Expect(session.Err).To(gbytes.Say(dockerarg(`http_proxy=http://admin:[email protected]:8080`)))
})
})
Context("when load_bases are specified", func() {
BeforeEach(func() {
os.Mkdir("/tmp/expected_base_1", os.ModeDir)
// this image should really be an actual tarball, but the test passes with text. :shrug:
ioutil.WriteFile("/tmp/expected_base_1/image", []byte("some-image-1"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_1/repository", []byte("some-repository-1"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_1/image-id", []byte("some-image-id-1"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_1/tag", []byte("some-tag-1"), os.ModePerm)
os.Mkdir("/tmp/expected_base_2", os.ModeDir)
ioutil.WriteFile("/tmp/expected_base_2/image", []byte("some-image-2"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_2/repository", []byte("some-repository-2"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_2/image-id", []byte("some-image-id-2"), os.ModePerm)
ioutil.WriteFile("/tmp/expected_base_2/tag", []byte("some-tag-2"), os.ModePerm)
os.Mkdir("/tmp/unexpected_base", os.ModeDir)
ioutil.WriteFile("/tmp/unexpected_base/image", []byte("some-image-3"), os.ModePerm)
ioutil.WriteFile("/tmp/unexpected_base/repository", []byte("some-repository-3"), os.ModePerm)
ioutil.WriteFile("/tmp/unexpected_base/image-id", []byte("some-image-id-3"), os.ModePerm)
ioutil.WriteFile("/tmp/unexpected_base/tag", []byte("some-tag-3"), os.ModePerm)
})
AfterEach(func() {
os.RemoveAll("/tmp/expected_base_1")
os.RemoveAll("/tmp/expected_base_2")
os.RemoveAll("/tmp/unexpected_base")
})
It("passes the arguments correctly to the docker daemon", func() {
session := put(map[string]interface{}{
"source": map[string]interface{}{
"repository": "test",
},
"params": map[string]interface{}{
"build": "/docker-image-resource/tests/fixtures/build",
"load_bases": []string{"expected_base_1", "expected_base_2"},
},
})
Expect(session.Err).To(gbytes.Say(docker(`load -i expected_base_1/image`)))
Expect(session.Err).To(gbytes.Say(docker(`tag some-image-id-1 some-repository-1:some-tag-1`)))
Expect(session.Err).To(gbytes.Say(docker(`load -i expected_base_2/image`)))
Expect(session.Err).To(gbytes.Say(docker(`tag some-image-id-2 some-repository-2:some-tag-2`)))
Expect(session.Err).NotTo(gbytes.Say(docker(`load -i unexpected_base/image`)))
Expect(session.Err).NotTo(gbytes.Say(docker(`tag some-image-id-3 some-repository-3:some-tag-3`)))
})
})
})
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/bitrise-io/go-utils/log"
"github.com/bitrise-tools/go-steputils/stepconf"
)
// Config ...
type Config struct {
Debug bool `env:"is_debug_mode,opt[yes,no]"`
// Message
WebhookURL stepconf.Secret `env:"webhook_url"`
WebhookURLOnError stepconf.Secret `env:"webhook_url_on_error"`
APIToken stepconf.Secret `env:"api_token"`
Channel string `env:"channel"`
ChannelOnError string `env:"channel_on_error"`
Text string `env:"text"`
TextOnError string `env:"text_on_error"`
IconEmoji string `env:"emoji"`
IconEmojiOnError string `env:"emoji_on_error"`
IconURL string `env:"icon_url"`
IconURLOnError string `env:"icon_url_on_error"`
LinkNames bool `env:"link_names,opt[yes,no]"`
Username string `env:"from_username"`
UsernameOnError string `env:"from_username_on_error"`
ThreadTs string `env:"thread_ts"`
ThreadTsOnError string `env:"thread_ts_on_error"`
ReplyBroadcast bool `env:"reply_broadcast,opt[yes,no]"`
ReplyBroadcastOnError bool `env:"reply_broadcast_on_error,opt[yes,no]"`
// Attachment
Color string `env:"color,required"`
ColorOnError string `env:"color_on_error"`
PreText string `env:"pretext"`
PreTextOnError string `env:"pretext_on_error"`
AuthorName string `env:"author_name"`
Title string `env:"title"`
TitleOnError string `env:"title_on_error"`
TitleLink string `env:"title_link"`
Message string `env:"message"`
MessageOnError string `env:"message_on_error"`
ImageURL string `env:"image_url"`
ImageURLOnError string `env:"image_url_on_error"`
ThumbURL string `env:"thumb_url"`
ThumbURLOnError string `env:"thumb_url_on_error"`
Footer string `env:"footer"`
FooterIcon string `env:"footer_icon"`
TimeStamp bool `env:"timestamp,opt[yes,no]"`
Fields string `env:"fields"`
Buttons string `env:"buttons"`
}
// success is true if the build is successful, false otherwise.
var success = os.Getenv("BITRISE_BUILD_STATUS") == "0"
// selectValue chooses the right value based on the result of the build.
func selectValue(ifSuccess, ifFailed string) string {
if success || ifFailed == "" {
return ifSuccess
}
return ifFailed
}
// selectBool chooses the right boolean value based on the result of the build.
func selectBool(ifSuccess, ifFailed bool) bool {
if success {
return ifSuccess
}
return ifFailed
}
// ensureNewlines replaces all \n substrings with newline characters.
func ensureNewlines(s string) string {
return strings.Replace(s, "\\n", "\n", -1)
}
func newMessage(c Config) Message {
msg := Message{
Channel: strings.TrimSpace(selectValue(c.Channel, c.ChannelOnError)),
Text: selectValue(c.Text, c.TextOnError),
Attachments: []Attachment{{
Fallback: ensureNewlines(selectValue(c.Message, c.MessageOnError)),
Color: selectValue(c.Color, c.ColorOnError),
PreText: selectValue(c.PreText, c.PreTextOnError),
AuthorName: c.AuthorName,
Title: selectValue(c.Title, c.TitleOnError),
TitleLink: c.TitleLink,
Text: ensureNewlines(selectValue(c.Message, c.MessageOnError)),
Fields: parseFields(c.Fields),
ImageURL: selectValue(c.ImageURL, c.ImageURLOnError),
ThumbURL: selectValue(c.ThumbURL, c.ThumbURLOnError),
Footer: c.Footer,
FooterIcon: c.FooterIcon,
Buttons: parseButtons(c.Buttons),
}},
IconEmoji: selectValue(c.IconEmoji, c.IconEmojiOnError),
IconURL: selectValue(c.IconURL, c.IconURLOnError),
LinkNames: c.LinkNames,
Username: selectValue(c.Username, c.UsernameOnError),
ThreadTs: selectValue(c.ThreadTs, c.ThreadTsOnError),
ReplyBroadcast: selectBool(c.ReplyBroadcast, c.ReplyBroadcastOnError),
}
if c.TimeStamp {
msg.Attachments[0].TimeStamp = int(time.Now().Unix())
}
return msg
}
// postMessage sends a message to a channel.
func postMessage(conf Config, msg Message) error {
b, err := json.Marshal(msg)
if err != nil {
return err
}
log.Debugf("Request to Slack: %s\n", b)
url := strings.TrimSpace(selectValue(string(conf.WebhookURL), string(conf.WebhookURLOnError)))
if url == "" {
url = "https://slack.com/api/chat.postMessage"
}
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
req.Header.Add("Content-Type", "application/json; charset=utf-8")
if string(conf.APIToken) != "" {
req.Header.Add("Authorization", "Bearer "+string(conf.APIToken))
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to send the request: %s", err)
}
defer func() {
if cerr := resp.Body.Close(); err == nil {
err = cerr
}
}()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("server error: %s, failed to read response: %s", resp.Status, err)
}
return fmt.Errorf("server error: %s, response: %s", resp.Status, body)
}
return nil
}
func validate(conf *Config) error {
if conf.APIToken == "" && conf.WebhookURL == "" {
return fmt.Errorf("Both API Token and WebhookURL are empty. You need to provide one of them. If you want to use incoming webhooks provide the webhook url. If you want to use a bot to send a message provide the bot API token")
}
if conf.APIToken != "" && conf.WebhookURL != "" {
log.Warnf("Both API Token and WebhookURL are provided. Using the API Token")
conf.WebhookURL = ""
}
return nil
}
func main() {
var conf Config
if err := stepconf.Parse(&conf); err != nil {
log.Errorf("Error: %s\n", err)
os.Exit(1)
}
stepconf.Print(conf)
log.SetEnableDebugLog(conf.Debug)
if err := validate(&conf); err != nil {
log.Errorf("Error: %s\n", err)
os.Exit(1)
}
msg := newMessage(conf)
if err := postMessage(conf, msg); err != nil {
log.Errorf("Error: %s", err)
os.Exit(1)
}
log.Donef("\nSlack message successfully sent! 🚀\n")
}
|
[
"\"BITRISE_BUILD_STATUS\""
] |
[] |
[
"BITRISE_BUILD_STATUS"
] |
[]
|
["BITRISE_BUILD_STATUS"]
|
go
| 1 | 0 | |
tests/test_hash.py
|
import datetime
import json
import os
from copy import deepcopy
from typing import Union, Dict
from unittest.mock import patch
from datacode import DataPipeline, DataSource, Variable
from tests.pipeline.base import PipelineTest
from tests.utils import INPUT_FILES_PATH
GENERATED_HASH_DIR = os.path.join(INPUT_FILES_PATH, 'hashes')
SHOULD_GENERATE = os.environ.get('DATACODE_GENERATE_HASH_TESTS', False) == 'true'
def check_or_store_hash_dict(obj_with_hd: Union[DataSource, DataPipeline], obj_name: str, pre_execute: bool = False):
if pre_execute:
hd = obj_with_hd._pre_execute_hash_dict
else:
hd = obj_with_hd.hash_dict()
if SHOULD_GENERATE:
store_hash_dict(hd, obj_name)
else:
check_hash_dict(hd, obj_name)
def check_hash_dict(hd: Dict[str, str], obj_name: str):
static_path = os.path.join(GENERATED_HASH_DIR, f'{obj_name}.json')
with open(static_path, 'r') as f:
expect_hash = json.load(f)
assert hd == expect_hash
def store_hash_dict(hd: Dict[str, str], obj_name: str):
static_path = os.path.join(GENERATED_HASH_DIR, f'{obj_name}.json')
with open(static_path, 'w') as f:
json.dump(hd, f, indent=2)
return
class HashTest(PipelineTest):
pass
class TestSourceHash(HashTest):
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_source(self):
self.create_csv()
ds = self.create_source()
check_or_store_hash_dict(ds, 'source')
df = ds.df
check_or_store_hash_dict(ds, 'source')
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_source_with_calculated_variable(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
d = Variable('d', 'D', calculation=a + b)
ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b, c, d])
dtp = self.create_transformation_pipeline(source=ds, func=lambda source: source)
check_or_store_hash_dict(dtp, 'transform_source_with_calculated')
dtp.execute()
check_or_store_hash_dict(dtp, 'transform_source_with_calculated', pre_execute=True)
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_source_with_repeated_variables_different_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
# First with original variable first, then transformation
load_variables = [
a,
a.add_one_cell(),
b,
c,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
dtp = self.create_transformation_pipeline(source=ds, func=lambda source: source)
check_or_store_hash_dict(dtp, 'transform_source_with_repeated_variables_different_transforms')
dtp.execute()
check_or_store_hash_dict(dtp, 'transform_source_with_repeated_variables_different_transforms', pre_execute=True)
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_source_with_calculated_and_same_calculated_variable_transformed(self):
self.create_csv()
# Try with plain calculated variable first
all_cols = self.create_columns()
a, b, c = self.create_variables()
tran = self.get_transform('cell')
d = Variable('d', 'D', calculation=a + b, available_transforms=[tran])
load_vars = [
a,
b,
c,
d,
d.add_one_cell()
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_vars)
dtp = self.create_transformation_pipeline(source=ds, func=lambda source: source)
check_or_store_hash_dict(dtp, 'transform_source_with_calculated_and_calculated_transformed')
dtp.execute()
check_or_store_hash_dict(dtp, 'transform_source_with_calculated_and_calculated_transformed', pre_execute=True)
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_source_with_calculate_on_transformed_before_and_after_transform(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
d = Variable('d', 'D', calculation=a + b.add_one_cell())
ds = self.create_source(df=None, columns=all_cols, load_variables=[a.add_one_cell(), b.add_one_cell(), c, d])
dtp = self.create_transformation_pipeline(source=ds, func=lambda source: source)
check_or_store_hash_dict(dtp, 'transform_source_with_calculate_on_transformed_before_after')
dtp.execute()
check_or_store_hash_dict(dtp, 'transform_source_with_calculate_on_transformed_before_after', pre_execute=True)
class TestPipelineHash(HashTest):
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_analysis_pipeline(self):
dap = self.create_analysis_pipeline()
check_or_store_hash_dict(dap, 'analysis')
dap.execute()
check_or_store_hash_dict(dap, 'analysis')
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_combine_pipeline(self):
dcp = self.create_combine_pipeline()
check_or_store_hash_dict(dcp, 'combine')
dcp.execute()
check_or_store_hash_dict(dcp, 'combine')
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_generator_pipeline(self):
dgp = self.create_generator_pipeline()
check_or_store_hash_dict(dgp, 'generator')
dgp.execute()
check_or_store_hash_dict(dgp, 'generator')
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_merge_pipeline(self):
dmp = self.create_merge_pipeline()
check_or_store_hash_dict(dmp, 'merge')
dmp.execute()
check_or_store_hash_dict(dmp, 'merge')
@patch('datacode.models.source.DataSource.last_modified', datetime.datetime(2020, 7, 29))
def test_hash_dict_transformation_pipeline(self):
dtp = self.create_transformation_pipeline()
check_or_store_hash_dict(dtp, 'transform')
dtp.execute()
check_or_store_hash_dict(dtp, 'transform')
|
[] |
[] |
[
"DATACODE_GENERATE_HASH_TESTS"
] |
[]
|
["DATACODE_GENERATE_HASH_TESTS"]
|
python
| 1 | 0 | |
cmd/kube-aggregator/main.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"runtime"
"k8s.io/kubernetes/cmd/kube-aggregator/pkg/cmd/server"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/logs"
// force compilation of packages we'll later rely upon
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/apis/apiregistration/install"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/apis/apiregistration/validation"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/clientset_generated/internalclientset"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/listers/apiregistration/internalversion"
_ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/listers/apiregistration/v1alpha1"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
cmd := server.NewCommandStartAggregator(os.Stdout, os.Stderr)
cmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := cmd.Execute(); err != nil {
cmdutil.CheckErr(err)
}
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
go/src/infra/cmd/skylab_swarming_worker/internal/swmbot/info.go
|
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package swmbot provides interaction with the Swarming bot running
// the Skylab worker process. This includes information about the
// Swarming bot as well as any Swarming bot local state.
package swmbot
import (
"fmt"
"os"
"path/filepath"
"infra/cmd/skylab_swarming_worker/internal/lucifer"
)
// Info contains information about the current Swarming bot.
type Info struct {
AdminService string
AutotestPath string
DUTID string
LuciferBinDir string
ParserPath string
SwarmingService string
Task Task
}
// GetInfo returns the Info for the current Swarming bot, built from
// environment variables.
//
// Per-bot variables:
//
// ADMIN_SERVICE: Admin service host, e.g. foo.appspot.com.
// AUTOTEST_DIR: Path to the autotest checkout on server.
// LUCIFER_TOOLS_DIR: Path to the lucifer installation.
// PARSER_PATH: Path to the autotest_status_parser installation.
// SKYLAB_DUT_ID: skylab_inventory id of the DUT that belongs to this bot.
// SWARMING_SERVICE: Swarming service host, e.g. https://foo.appspot.com.
//
// Per-task variables:
//
// SWARMING_TASK_ID: task id of the swarming task being serviced.
func GetInfo() *Info {
return &Info{
AdminService: os.Getenv("ADMIN_SERVICE"),
AutotestPath: os.Getenv("AUTOTEST_DIR"),
DUTID: os.Getenv("SKYLAB_DUT_ID"),
LuciferBinDir: os.Getenv("LUCIFER_TOOLS_DIR"),
ParserPath: os.Getenv("PARSER_PATH"),
SwarmingService: os.Getenv("SWARMING_SERVICE"),
Task: Task{
RunID: os.Getenv("SWARMING_TASK_ID"),
},
}
}
// Task describes the bot's current task.
type Task struct {
RunID string
}
// LuciferConfig returns the lucifer.Config for the Swarming bot.
func (b *Info) LuciferConfig() lucifer.Config {
return lucifer.Config{
AutotestPath: b.AutotestPath,
BinDir: b.LuciferBinDir,
}
}
// ResultsDir returns the path to the results directory used by the bot task.
func (b *Info) ResultsDir() string {
// TODO(pprabhu): Reflect the requesting swarming server URL in the resultdir.
// This will truly disambiguate results between different swarming servers.
return filepath.Join(b.AutotestPath, "results", resultsSubdir(b.Task.RunID))
}
// TaskRunURL returns the URL for the current Swarming task execution.
func (b *Info) TaskRunURL() string {
// TODO(ayatane): Remove this fallback once SWARMING_SERVICE is passed down here.
if b.SwarmingService == "" {
return fmt.Sprintf("https://chromeos-swarming.appspot.com/task?id=%s", b.Task.RunID)
}
return fmt.Sprintf("%s/task?id=%s", b.SwarmingService, b.Task.RunID)
}
// StainlessURL returns the URL to the stainless logs browser for logs offloaded
// from this task.
func (t *Task) StainlessURL() string {
return fmt.Sprintf(
"https://stainless.corp.google.com/browse/chromeos-autotest-results/%s/",
resultsSubdir(t.RunID))
}
func resultsSubdir(runID string) string {
return filepath.Join(fmt.Sprintf("swarming-%s0", runID[:len(runID)-1]), runID[len(runID)-1:])
}
|
[
"\"ADMIN_SERVICE\"",
"\"AUTOTEST_DIR\"",
"\"SKYLAB_DUT_ID\"",
"\"LUCIFER_TOOLS_DIR\"",
"\"PARSER_PATH\"",
"\"SWARMING_SERVICE\"",
"\"SWARMING_TASK_ID\""
] |
[] |
[
"LUCIFER_TOOLS_DIR",
"SKYLAB_DUT_ID",
"ADMIN_SERVICE",
"AUTOTEST_DIR",
"SWARMING_TASK_ID",
"SWARMING_SERVICE",
"PARSER_PATH"
] |
[]
|
["LUCIFER_TOOLS_DIR", "SKYLAB_DUT_ID", "ADMIN_SERVICE", "AUTOTEST_DIR", "SWARMING_TASK_ID", "SWARMING_SERVICE", "PARSER_PATH"]
|
go
| 7 | 0 | |
joshua/process_handling.py
|
"""
process_handling.py
This is a collection of utility functions that are useful for handling
processes. They include the tools necessary to determine which processes
are subprocesses of the current Joshua process by looking at the appropriate
environment variables.
"""
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import re
import signal
import subprocess
import threading
import time
VAR_NAME = "OF_HOUSE_JOSHUA"
# Create an alarm handler that is used to do timeouts.
class TimeoutError(RuntimeError):
pass
def alarm_handler(*args):
raise TimeoutError
signal.signal(signal.SIGALRM, alarm_handler)
# Determines if there is a running process with a given PID. The PID
# should be given in integer form.
def check_alive(pid):
try:
# Sends signal 0 (which is ignored) to the given process.
os.kill(pid, 0)
return True
except OSError as e:
if e.errno == errno.ESRCH:
# No such process exists. The process is dead.
return False
elif e.errno == errno.EPERM:
# Process exists, but we don't have permission to kill it.
return True
else:
# A more serious error. Propagate the error upwards.
raise e
# Add an environment variable to the given dictionary with
# this processes PID.
def mark_environment(env, pid=str(os.getpid())):
env2 = dict(env)
env2[VAR_NAME] = pid
return env2
# This gets all of the currently running process IDs. They are returned
# as a list of strings.
# NOTE: This only runs on Linux--NOT macOS.
# (There is a library, psutil, that works cross-platform, and
# maybe we should consider going to that at some point, but for now,
# this is sufficient, and it doesn't require downloading more open-
# source software.)
def get_all_process_pids():
pids = os.listdir("/proc")
is_number = re.compile(r"^\d+$")
return filter(lambda x: is_number.match(x) is not None, pids)
# Given the PID, this returns the environment of the running process.
def get_environment(pid):
# Make sure the PID is an integer.
if type(pid) is int:
pid = str(pid)
try:
# Read the environment information and convert it into a dictionary.
with open(os.path.join("/proc", pid, "environ"), "rb") as env_file:
env_str = env_file.read()
var_strs = filter(lambda x: len(x) > 0, env_str.split(b"\x00"))
return dict(
map(
lambda var_str: (
var_str[: var_str.find(b"=")],
var_str[var_str.find(b"=") + 1 :],
),
var_strs,
)
)
except IOError:
# This is not our process, so we can't open the file.
return dict()
# Get all child processes by looking for those with the correct
# Joshua ID.
def retrieve_children(pid=str(os.getpid())):
def check(candidate):
env = get_environment(candidate)
return VAR_NAME in env and env[VAR_NAME] == pid
return filter(check, get_all_process_pids())
# Waits for the death of a process, but no longer than timeout. It returns
# true if the process ended and false if it timed out or if there was some
# kind of error. This is probably caused by the process not existing, but
# good to return this was an error instead.
# <i>Because death could not stop for me -- I kindly stopped for him.</i>
# -- Emily Dickinson
def wait_for_death(pid, timeout=5):
def wait_helper(p):
try:
os.waitpid(p, 0)
except OSError as e:
if e.errno == errno.ECHILD:
# No process exists. Most likely, the process has already exited.
pass
else:
raise e
try:
# Create a threading object and to wait for the pid to die.
t = threading.Thread(target=wait_helper, args=(pid,))
t.start()
# Actually wait for death, only going as far as timeout.
t.join(timeout=timeout)
# Success.
ret_val = True
except Exception:
# Something bad happened. Assume this failed.
ret_val = False
sys.stdout.write(">")
sys.stdout.flush()
return ret_val
# Kills all the processes spun off from the current process.
def kill_all_children(pid=str(os.getpid())):
child_pids = list(sorted(map(int, retrieve_children(pid))))
if len(child_pids) == 0:
return True
# Send the terminate signal to each.
for child_pid in child_pids:
try:
# Kill, then wait for death for each process.
os.kill(child_pid, signal.SIGKILL)
wait_for_death(child_pid)
except OSError:
# We couldn't kill the current process (possibly
# because it is already dead).
pass
# Because os.waitpid still has issues..
# FIXME: This may actually be unnecessary.
time.sleep(1)
stragglers = len(filter(check_alive, child_pids))
if stragglers > 0:
# Could not kill everything. Raise an error to force restart.
raise OSError("Not all of the child processes could be killed during cleanup.")
# As a final check, retrieve all child PIDs. If there's anything
# here, it means that there are still some processes were started
# up after we identified those that were to be killed.
new_child_pids = len(retrieve_children(pid))
if new_child_pids > 0:
raise OSError("New processes were begun after children were identified.")
return True
# Check all running subprocesses to see if a zombie was created.
def any_zombies():
out, err = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE).communicate()
if err is not None:
raise OSError(
"Process list information was not successfully retrieved. Error number = "
+ str(err)
)
# Look for the string "<defunct>" in the process listing and return true if anything contains it.
# Ignore any that contain "health_check" as those are currently being injected into the
# environment but are not from us:
# <rdar://problem/42791356> Healthcheck agent is leaving zombie processes visible to application
return list(
filter(
lambda x: "<defunct>" in x and not "health_check" in x,
out.decode("utf-8").split("\n"),
)
)
# UNIT TESTS
import unittest
import sys
class TestProcessHandling(unittest.TestCase):
def test_check_alive(self):
# Start long-running process.
process = subprocess.Popen(
["sleep", "100"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
self.assertTrue(check_alive(process.pid))
os.kill(process.pid, signal.SIGKILL)
process.communicate() # Wait for kill
self.assertFalse(check_alive(process.pid))
def test_mark_env(self):
env = mark_environment(dict())
self.assertEquals(os.getpid(), int(env[VAR_NAME]))
def test_get_all_pids(self):
if sys.platform != "linux2":
self.fail("This platform is not supported.")
else:
pids = get_all_process_pids()
self.assertTrue(len(pids) > 0) # More than 1 running process.
# Each should be a number.
try:
pid_nums = map(int, pids)
except ValueError:
self.fail("Does not return only integers.")
# Each should be a directory in the given file.
for pid in pids:
self.assertTrue(os.path.isdir(os.path.join("/proc", pid)))
# This should contain a number of processes, but this one is a
# good starting point to check.
self.assertTrue(str(os.getpid()) in pids)
def test_get_environment(self):
if sys.platform != "linux2":
self.fail("This platform is not supported")
else:
# Make sure the environment for this process is the same
# as we know it to be.
env = get_environment(str(os.getpid()))
self.assertEquals(env, os.environ)
env = get_environment(os.getpid())
self.assertEquals(env, os.environ)
def test_retrieve_children(self):
if sys.platform != "linux2":
self.fail("This platform is not supported")
else:
env = mark_environment(os.environ)
for i in range(10):
subprocess.Popen(
["sleep", "2"],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
pids = retrieve_children()
self.assertEquals(len(pids), 10)
def test_kill_all_children(self):
if sys.platform != "linux2":
self.fail("This platform is not supported")
else:
env = mark_environment(os.environ)
for i in range(10):
subprocess.Popen(
["sleep", "100"],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.assertTrue(kill_all_children())
self.assertEquals(len(retrieve_children()), 0)
def test_wait_for_death(self):
process = subprocess.Popen(
["sleep", "2"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# self.assertFalse(wait_for_death(process.pid, timeout=1))
process = subprocess.Popen(
["sleep", "1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.assertTrue(wait_for_death(process.pid))
def test_any_zombies(self):
self.assertFalse(any_zombies())
# Ideally, this unit test would also have a "false" case, but making a zombie is risky business
# so is probably best avoided.
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/net/bytle/niofs/sftp/TestFileSystemParameters.java
|
package net.bytle.niofs.sftp;
import java.nio.file.Paths;
import java.util.Map;
/**
* Created by gerard on 23-05-2016.
* The URI parameters for the test
* They are fixed in the class but can be changed through environment parameters
*
* * BYTLE_NIOFS_SFTP_USER
* * BYTLE_NIOFS_SFTP_PWD
* * BYTLE_NIOFS_SFTP_HOST
* * BYTLE_NIOFS_SFTP_PORT
* * BYTLE_NIOFS_SFTP_WORKING_DIR
* * BYTLE_NIOFS_SFTP_HOME_USER_DIR
*/
public class TestFileSystemParameters {
protected static String USER = "user";
protected static String PWD = "pwd";
protected static String HOST = "localhost";
protected static Integer PORT = MockSshSftpServer.PORT;
protected static String WORKING_DIR = null;
protected static String HOME_USER_DIR = null;
protected static String URL = "sftp://" + TestFileSystemParameters.USER + ":" + TestFileSystemParameters.PWD + "@" + TestFileSystemParameters.HOST + ":" + TestFileSystemParameters.PORT;
static {
Map<String, String> environments = System.getenv();
if (environments.get("BYTLE_NIOFS_SFTP_USER") != null) {
USER = environments.get("BYTLE_NIOFS_SFTP_USER");
PWD = environments.get("BYTLE_NIOFS_SFTP_PWD") != null ? environments.get("BYTLE_NIOFS_SFTP_PWD") : PWD;
HOST = environments.get("BYTLE_NIOFS_SFTP_HOST") != null ? environments.get("BYTLE_NIOFS_SFTP_HOST") : HOST;
PORT = environments.get("BYTLE_NIOFS_SFTP_PORT") != null ? Integer.valueOf(environments.get("BYTLE_NIOFS_SFTP_PORT")) : PORT;
WORKING_DIR = environments.get("BYTLE_NIOFS_SFTP_WORKING_DIR") != null ? environments.get("BYTLE_NIOFS_SFTP_WORKING_DIR") : null;
// The home user dir can be found dynamically but to test the Paths operations, we need to set an absolute path
// and therefore we need to known the home user directory before making a connection.
HOME_USER_DIR = environments.get("BYTLE_NIOFS_SFTP_HOME_USER_DIR") != null ? environments.get("BYTLE_NIOFS_SFTP_HOME_USER_DIR") : "/home/gerardni-niosftp";
URL = "sftp://" + USER + ":" + PWD + "@" + HOST + ":" + PORT;
} else {
HOME_USER_DIR = Paths.get("").toString();
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
src/main/java/org/dimdev/riftloader/Main.java
|
package org.dimdev.riftloader;
import net.minecraft.launchwrapper.Launch;
import org.dimdev.utils.ReflectionUtils;
import javax.swing.*;
import java.io.*;
import java.net.URL;
import java.nio.channels.Channels;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.*;
public class Main {
private static final String[] LIBRARIES = {
"https://www.dimdev.org/maven/org/dimdev/mixin/0.7.11-SNAPSHOT/mixin-0.7.11-SNAPSHOT.jar",
"https://repo1.maven.org/maven2/org/ow2/asm/asm/6.2/asm-6.2.jar",
"https://repo1.maven.org/maven2/org/ow2/asm/asm-commons/6.2/asm-commons-6.2.jar",
"https://repo1.maven.org/maven2/org/ow2/asm/asm-tree/6.2/asm-tree-6.2.jar",
"https://libraries.minecraft.net/net/minecraft/launchwrapper/1.12/launchwrapper-1.12.jar"
};
public static final String VANILLA_SERVER = "https://launcher.mojang.com/v1/objects/3737db93722a9e39eeada7c27e7aca28b144ffa7/server.jar";
// public static final String SPIGOT_SERVER = "https://cdn.getbukkit.org/spigot/spigot-1.13.jar";
public static void main(String[] args) throws Throwable {
if (args.length == 0 || args[0].equals("--install")) {
runClientInstaller();
} else if (args[0].equals("--server")) {
File serverJar = new File("server.jar");
if (!serverJar.isFile()) {
System.out.println("File 'server.jar' does not exist");
System.out.println("Choose which server you'd like to download:");
System.out.println(" 1) Vanilla");
// System.out.println(" 2) Spigot");
System.out.print("Choice: ");
URL url;
String line = new Scanner(System.in).nextLine().toLowerCase();
if (line.startsWith("1") || line.startsWith("v")) {
url = new URL(VANILLA_SERVER);
// } else if (line.startsWith("2") || line.startsWith("s")) {
// url = new URL(SPIGOT_SERVER);
} else {
System.err.println("Not a valid choice");
return;
}
System.out.println("Downloading server jar: " + url);
new FileOutputStream(serverJar).getChannel().transferFrom(Channels.newChannel(url.openStream()), 0, Long.MAX_VALUE);
}
ReflectionUtils.addURLToClasspath(serverJar.toURI().toURL());
for (String url : LIBRARIES) {
ReflectionUtils.addURLToClasspath(getOrDownload(new File("libs"), new URL(url)).toURI().toURL());
}
List<String> argsList = new ArrayList<>(Arrays.asList(args).subList(1, args.length));
argsList.add("--tweakClass");
argsList.add("org.dimdev.riftloader.launch.RiftLoaderServerTweaker");
System.out.println("Launching server...");
Launch.main(argsList.toArray(new String[0]));
}
}
private static File getOrDownload(File directory, URL url) throws IOException {
String urlString = url.toString();
File target = new File(directory, urlString.substring(urlString.lastIndexOf('/') + 1));
if (target.isFile()) {
return target;
}
target.getParentFile().mkdirs();
System.out.println("Downloading library: " + urlString);
new FileOutputStream(target).getChannel().transferFrom(Channels.newChannel(url.openStream()), 0, Long.MAX_VALUE);
return target;
}
public static void runClientInstaller() {
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch (Throwable t) {
t.printStackTrace();
}
try {
File minecraftFolder;
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (osName.contains("win")) {
minecraftFolder = new File(System.getenv("APPDATA") + "/.minecraft");
} else if (osName.contains("mac")) {
minecraftFolder = new File(System.getProperty("user.home") + "/Library/Application Support/minecraft");
} else {
minecraftFolder = new File(System.getProperty("user.home") + "/.minecraft");
}
// modified: choose path
JFileChooser dlg = new JFileChooser(minecraftFolder);
dlg.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
int res = dlg.showOpenDialog(null);
if(res == JFileChooser.APPROVE_OPTION) {
minecraftFolder = dlg.getSelectedFile();
} else {
JOptionPane.showMessageDialog(null, "to quit", "Rift Installer", JOptionPane.INFORMATION_MESSAGE);
return;
}
// Copy rift jar to libraries
// modified: copy @[email protected]
File versionFolder = new File(minecraftFolder, "versions");
File oldJar = null;
if(versionFolder.exists()) {
for(File f : versionFolder.listFiles()) {
String folder = f.getName();
if(folder.startsWith("1.13.2")) {
File gameJar = new File(f, folder + ".jar");
if(gameJar.isFile()) {
oldJar = gameJar;
break;
}
}
}
}
// Copy the version json
File versionJson = new File(minecraftFolder, "versions/1.13.2-rift-@VERSION@/1.13.2-rift-@[email protected]");
versionJson.getParentFile().mkdirs();
Files.copy(Main.class.getResourceAsStream("/profile.json"), versionJson.toPath(), StandardCopyOption.REPLACE_EXISTING);
// modified: copy @[email protected]
if(oldJar != null) {
File versionJar = new File(minecraftFolder, "versions/1.13.2-rift-@VERSION@/1.13.2-rift-@[email protected]");
//versionJar.getParentFile().mkdirs();
Files.copy(oldJar.toPath(), versionJar.toPath(), StandardCopyOption.REPLACE_EXISTING);
}
// Make mods directory
try {
File modsFolder = new File(minecraftFolder, "mods");
modsFolder.mkdirs();
} catch (Throwable t) {
t.printStackTrace();
}
// Add rift as a profile
try {
File profilesJson = new File(minecraftFolder, "launcher_profiles.json");
if (profilesJson.exists()) { // TODO: use gson instead
String contents = new String(Files.readAllBytes(profilesJson.toPath()));
if (contents.contains("\"rift\"")) {
contents = contents.replaceAll(",\n *\"rift\": \\{[^}]*},", ",");
contents = contents.replaceAll(",?\n *\"rift\": \\{[^}]*},?", "");
}
contents = contents.replace("\n \"profiles\": {", "\n \"profiles\": {\n" +
" \"rift\": {\n" +
" \"name\": \"Rift\",\n" +
" \"type\": \"custom\",\n" +
" \"created\": \"2018-08-13T00:00:00.000Z\",\n" +
" \"lastUsed\": \"2100-01-01T00:00:00.000Z\",\n" +
" \"lastVersionId\": \"1.13.2-rift-@VERSION@\"\n" +
" },");
Files.write(profilesJson.toPath(), contents.getBytes());
}
} catch (Throwable t) {
t.printStackTrace();
}
try {
String source = Main.class.getProtectionDomain().getCodeSource().getLocation().getPath();
if (source.startsWith("/") && osName.contains("win")) {
source = source.substring(1);
}
File riftJar = new File(minecraftFolder, "libraries/org/dimdev/rift/@VERSION@/rift-@[email protected]");
riftJar.getParentFile().mkdirs();
Files.copy(Paths.get(source), riftJar.toPath(), StandardCopyOption.REPLACE_EXISTING);
} catch (Throwable t) {
t.printStackTrace();
}
// modified: copy @[email protected]
String tips = "It seems that you don't have any 1.13.2.jar\n"
+ "You can:\n"
+ " 1. Download `1.13.2.jar`;\n"
+ " 2. Rename it as `1.13.2-rift-@[email protected]`;\n"
+ " 3. Put it into the folder `" + new File(versionFolder, "1.13.2-rift-@VERSION@") + "`\n"
+ "----------------------------------------------------------------------\n";
JOptionPane.showMessageDialog(null,
"Rift @VERSION@ for Minecraft 1.13.2 has been successfully installed at\n" +
"[ " + minecraftFolder.getAbsolutePath() + " ]\n" +
"----------------------------------------------------------------------\n" +
(oldJar == null ? tips : "") +
"It is available in the dropdown menu of the vanilla Minecraft launcher.\n" +
"You'll need to restart the Minecraft Launcher if you had it open when\n" +
"you ran this installer.",
"Rift Installer", JOptionPane.INFORMATION_MESSAGE);
} catch (Throwable t) {
StringWriter w = new StringWriter();
t.printStackTrace(new PrintWriter(w));
JOptionPane.showMessageDialog(null,
"An error occured while installing Rift, please report this to the issue\n" +
"tracker (https://github.com/DimensionalDevelopment/Rift/issues):\n" +
"\n" +
w.toString().replace("\t", " "), "Rift Installer", JOptionPane.ERROR_MESSAGE);
}
}
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
java
| 1 | 0 | |
config.py
|
from pathlib import Path
from dotenv import load_dotenv
import os
#Dotenv Configuration
env_path = Path('.')/'.env'
load_dotenv(dotenv_path=env_path)
class Config:
'''
General configuration class
'''
SECRET_KEY = os.getenv("SECRET_KEY") #secret key for wtf forms
UPLOADED_PHOTOS_DEST = 'app/static/photos' #storage location of uploaded photos in the app
# app email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
class ProdConfig(Config):
'''
Test configuration child class
'''
SQLALCHEMY_DATABASE_URI = 'postgresql://oclczfxlojwown:7848830183b333c34c82252cc505bbef8ad42f6247e849801436ae10736bd189@ec2-34-197-135-44.compute-1.amazonaws.com:5432/d45ffsiu3qs3l2'
class TestConfig(Config):
'''
Production configuration child class
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://toshiba:@localhost/blogs_test'
class DevConfig(Config):
'''
Development configuration child class
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://toshiba:@localhost/blogs'
DEBUG = True
config_options = {
'development':DevConfig,
'production': ProdConfig,
'test':TestConfig
}
|
[] |
[] |
[
"MAIL_PASSWORD",
"SECRET_KEY",
"MAIL_USERNAME"
] |
[]
|
["MAIL_PASSWORD", "SECRET_KEY", "MAIL_USERNAME"]
|
python
| 3 | 0 | |
scripts/backup/s3cleanup.py
|
import sys
import os
import boto3
import datetime
import argparse
def cleanup_s3db(args):
s3 = boto3.resource('s3', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'], region_name=os.environ['AWS_S3_REGION_NAME'], endpoint_url=os.environ['AWS_S3_ENDPOINT_URL'], use_ssl=os.environ['AWS_S3_USE_SSL'])
bucket = s3.Bucket(args.bucket)
today = datetime.date.today()
last_month = today - datetime.timedelta(days=30)
prefix = last_month.strftime('%Y/%m')
backups = [o for o in bucket.objects.filter(Prefix=prefix)]
to_delete = backups[0:-1]
client = boto3.client('s3')
deleted = 0
for o in to_delete:
print('Delete object %s' % o)
deleted += 1
o.delete()
if deleted:
print('%d backups deleted' % (deleted))
return 0
def main():
parser = argparse.ArgumentParser(description='Delete old database backups')
parser.add_argument('-b', dest='bucket', action='store', help='bucket name')
parser.add_argument('--force', action='store_true', default=False, help='Force check even if not in production environment')
args = parser.parse_args()
env = os.environ['IMAGE_TAG']
if env != 'prod' and not args.force:
return 0
else:
return cleanup_s3db(args)
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"AWS_S3_USE_SSL",
"AWS_SECRET_ACCESS_KEY",
"AWS_S3_ENDPOINT_URL",
"IMAGE_TAG",
"AWS_S3_REGION_NAME",
"AWS_ACCESS_KEY_ID"
] |
[]
|
["AWS_S3_USE_SSL", "AWS_SECRET_ACCESS_KEY", "AWS_S3_ENDPOINT_URL", "IMAGE_TAG", "AWS_S3_REGION_NAME", "AWS_ACCESS_KEY_ID"]
|
python
| 6 | 0 | |
browsersteps_test.go
|
package browsersteps
import (
"encoding/json"
"fmt"
"log"
"net"
"os"
"strconv"
"testing"
"time"
"net/http"
"net/http/httptest"
"net/url"
"github.com/cucumber/godog"
"github.com/tebeka/selenium"
)
func iWaitFor(amount int, unit string) error {
u := time.Second
fmt.Printf("Waiting for %d %s", amount, unit)
time.Sleep(u * time.Duration(amount))
return nil
}
func FeatureContext(s *godog.Suite) {
s.Step(`^I wait for (\d+) (milliseconds|millisecond|seconds|second)$`, iWaitFor)
debug := os.Getenv("DEBUG")
if debug != "" {
val, err := strconv.ParseBool(debug)
if err == nil {
selenium.SetDebug(val)
}
}
capabilities := selenium.Capabilities{"browserName": "chrome"}
capEnv := os.Getenv("SELENIUM_CAPABILITIES")
if capEnv != "" {
err := json.Unmarshal([]byte(capEnv), &capabilities)
if err != nil {
log.Panic(err)
}
}
bs := NewBrowserSteps(s, capabilities, os.Getenv("SELENIUM_URL"))
var server *httptest.Server
s.BeforeSuite(func() {
server = httptest.NewUnstartedServer(http.FileServer(http.Dir("./public")))
listenAddress := os.Getenv("SERVER_LISTEN")
if listenAddress != "" {
var err error
server.Listener, err = net.Listen("tcp4", listenAddress)
if err != nil {
log.Fatal(err)
}
}
server.Start()
u, err := url.Parse(server.URL)
if err != nil {
log.Panic(err.Error())
}
bs.SetBaseURL(u)
})
s.AfterSuite(func() {
if server != nil {
server.Close()
server = nil
}
})
}
func TestMain(m *testing.M) {
status := godog.Run("browsersteps", FeatureContext)
os.Exit(status)
}
|
[
"\"DEBUG\"",
"\"SELENIUM_CAPABILITIES\"",
"\"SELENIUM_URL\"",
"\"SERVER_LISTEN\""
] |
[] |
[
"SERVER_LISTEN",
"SELENIUM_CAPABILITIES",
"SELENIUM_URL",
"DEBUG"
] |
[]
|
["SERVER_LISTEN", "SELENIUM_CAPABILITIES", "SELENIUM_URL", "DEBUG"]
|
go
| 4 | 0 | |
detectPlate_yolov4-tiny.py
|
# import os
# os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from os import listdir
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('image', './data/kite.jpg', 'path to input image')
flags.DEFINE_string('dir', './car', 'dir to input image')
flags.DEFINE_string('output', 'result.png', 'path to output image')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.25, 'score threshold')
def main(_argv):
config = ConfigProto()
# config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
image_path = FLAGS.image
image_dir = FLAGS.dir
filesNames = listdir(image_dir)
for file in filesNames:
print('./car/' + file)
original_image = cv2.imread('./car/' + file)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
print(pred_bbox[2])
# if 2 in pred_bbox[2] or 3 in pred_bbox[2] or 5 in pred_bbox[2] or 7 in pred_bbox[2]:
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/cmd/kubernikusctl/auth/init.go
|
package auth
import (
"fmt"
"net/url"
"os"
"strings"
"github.com/gophercloud/gophercloud"
"github.com/howeyc/gopass"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
keyring "github.com/zalando/go-keyring"
"k8s.io/klog"
"github.com/sapcc/kubernikus/pkg/cmd/kubernikusctl/common"
)
type InitOptions struct {
_url string
url *url.URL
name string
kubeconfigPath string
authType string
openstack *common.OpenstackClient
kubernikus *common.KubernikusClient
}
func NewInitCommand() *cobra.Command {
o := &InitOptions{
name: os.Getenv("KUBERNIKUS_NAME"),
_url: os.Getenv("KUBERNIKUS_URL"),
openstack: common.NewOpenstackClient(),
}
c := &cobra.Command{
Use: "init",
Short: "Prepares kubeconfig with Kubernikus credentials",
Run: func(c *cobra.Command, args []string) {
common.SetupLogger()
common.CheckError(o.Validate(c, args))
common.CheckError(o.Complete(args))
common.CheckError(o.Run(c))
},
}
o.BindFlags(c.Flags())
return c
}
func (o *InitOptions) BindFlags(flags *pflag.FlagSet) {
o.openstack.BindFlags(flags)
common.BindLogFlags(flags)
flags.StringVar(&o._url, "url", o._url, "URL for Kubernikus API")
flags.StringVar(&o.name, "name", o.name, "Cluster Name")
flags.StringVar(&o.kubeconfigPath, "kubeconfig", o.kubeconfigPath, "Overwrites kubeconfig auto-detection with explicit path")
flags.StringVar(&o.authType, "auth-type", o.authType, "Authentication type")
}
func (o *InitOptions) Validate(c *cobra.Command, args []string) (err error) {
if o._url != "" {
if o.url, err = url.Parse(o._url); err != nil {
return errors.Errorf("Parsing the Kubernikus URL failed")
}
}
return o.openstack.Validate(c, args)
}
func (o *InitOptions) Complete(args []string) (err error) {
if err := o.openstack.Complete(args); err != nil {
return err
}
return nil
}
func (o *InitOptions) Run(c *cobra.Command) (err error) {
storePasswordInKeyRing := false
if o.openstack.Password == "" && o.openstack.ApplicationCredentialSecret == "" && o.openstack.TokenID == "" {
fmt.Printf("Password: ")
if password, err := gopass.GetPasswdMasked(); err != nil {
return err
} else {
o.openstack.Password = string(password)
storePasswordInKeyRing = true
}
}
if err := o.setup(); err != nil {
if _, ok := errors.Cause(err).(gophercloud.ErrDefault401); o.openstack.Username != "" && ok {
fmt.Println("Deleting password from keyring")
keyring.Delete("kubernikus", strings.ToLower(o.openstack.Username))
}
return err
}
if o.name == "" {
if cluster, err := o.kubernikus.GetDefaultCluster(); err != nil {
return errors.Wrapf(err, "You need to provide --name. Cluster Auto-Detection failed")
} else {
o.name = cluster.Name
klog.V(2).Infof("Detected cluster name: %v", o.name)
}
}
var kubeconfig string
var errCredentials error
if o.authType == "oidc" {
fmt.Printf("Fetching OIDC credentials for %v from %v\n", o.name, o.url)
kubeconfig, errCredentials = o.kubernikus.GetCredentialsOIDC(o.name)
} else {
fmt.Printf("Fetching credentials for %v from %v\n", o.name, o.url)
kubeconfig, errCredentials = o.kubernikus.GetCredentials(o.name)
}
if errCredentials != nil {
return errors.Wrap(errCredentials, "Couldn't fetch credentials from Kubernikus API")
}
if storePasswordInKeyRing {
fmt.Println("Storing password in keyring")
keyring.Set("kubernikus", strings.ToLower(o.openstack.Username), o.openstack.Password)
}
ktx, err := common.NewKubernikusContext(o.kubeconfigPath, "")
if err != nil {
return errors.Wrapf(err, "Failed to load kubeconfig")
}
if err := ktx.MergeAndPersist(kubeconfig); err != nil {
return errors.Wrapf(err, "Couldn't merge existing kubeconfig with fetched credentials")
}
fmt.Printf("Updated kubeconfig at %s\n", ktx.PathOptions.GetDefaultFilename())
return nil
}
func (o *InitOptions) setup() error {
klog.V(2).Infof(o.openstack.PrintDebugAuthInfo())
fmt.Println(o.openstack.PrintAuthInfo())
if err := o.openstack.Authenticate(); err != nil {
return errors.Wrapf(err, "Authentication failed")
}
if o.url == nil {
if url, err := o.openstack.DefaultKubernikusURL(); err != nil {
return errors.Wrapf(err, "You need to provide --url. Auto-Detection failed")
} else {
o.url = url
klog.V(2).Infof("Detected Kubernikus URL: %v", url)
}
}
o.kubernikus = common.NewKubernikusClient(o.url, o.openstack.Provider.TokenID)
return nil
}
|
[
"\"KUBERNIKUS_NAME\"",
"\"KUBERNIKUS_URL\""
] |
[] |
[
"KUBERNIKUS_URL",
"KUBERNIKUS_NAME"
] |
[]
|
["KUBERNIKUS_URL", "KUBERNIKUS_NAME"]
|
go
| 2 | 0 | |
pkg/events/eventing.go
|
package events
/*
Copyright 2019 - 2021 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"encoding/json"
"errors"
"fmt"
"os"
"reflect"
"time"
crunchylog "github.com/crunchydata/postgres-operator/internal/logging"
"github.com/nsqio/go-nsq"
log "github.com/sirupsen/logrus"
)
// String returns the string form for a given LogLevel
func Publish(e EventInterface) error {
// Add logging configuration
crunchylog.CrunchyLogger(crunchylog.SetParameters())
eventAddr := os.Getenv("EVENT_ADDR")
if eventAddr == "" {
return errors.New("EVENT_ADDR not set")
}
if os.Getenv("DISABLE_EVENTING") == "true" {
log.Debugf("eventing disabled")
return nil
}
cfg := nsq.NewConfig()
// cfg.UserAgent = fmt.Sprintf("to_nsq/%s go-nsq/%s", version.Binary, nsq.VERSION)
cfg.UserAgent = fmt.Sprintf("go-nsq/%s", nsq.VERSION)
log.Debugf("publishing %s message %s", reflect.TypeOf(e), e.String())
log.Debugf("header %s ", e.GetHeader().String())
header := e.GetHeader()
header.Timestamp = time.Now()
b, err := json.MarshalIndent(e, "", " ")
if err != nil {
log.Errorf("Error: %s", err)
return err
}
log.Debug(string(b))
var producer *nsq.Producer
producer, err = nsq.NewProducer(eventAddr, cfg)
if err != nil {
log.Errorf("Error: %s", err)
return err
}
topics := e.GetHeader().Topic
if len(topics) == 0 {
log.Errorf("Error: topics list is empty and is required to publish")
return err
}
for i := 0; i < len(topics); i++ {
err = producer.Publish(topics[i], b)
if err != nil {
log.Errorf("Error: %s", err)
return err
}
}
// always publish to the All topic
err = producer.Publish(EventTopicAll, b)
if err != nil {
log.Errorf("Error: %s", err)
return err
}
return nil
}
|
[
"\"EVENT_ADDR\"",
"\"DISABLE_EVENTING\""
] |
[] |
[
"EVENT_ADDR",
"DISABLE_EVENTING"
] |
[]
|
["EVENT_ADDR", "DISABLE_EVENTING"]
|
go
| 2 | 0 | |
pills/legacy/edit.go
|
package main
import (
"errors"
"fmt"
"os"
"os/exec"
)
func startEditor(path string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
return errors.New("empty env editor")
}
cmd := exec.Command(editor, path)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("run: %v", err)
}
return nil
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
airflow/providers/google/cloud/example_dags/example_cloud_build.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that displays interactions with Google Cloud Build.
This DAG relies on the following OS environment variables:
* GCP_PROJECT_ID - Google Cloud Project to use for the Cloud Function.
* GCP_CLOUD_BUILD_ARCHIVE_URL - Path to the zipped source in Google Cloud Storage.
This object must be a gzipped archive file (.tar.gz) containing source to build.
* GCP_CLOUD_BUILD_REPOSITORY_NAME - Name of the Cloud Source Repository.
"""
import os
from datetime import datetime
from pathlib import Path
from typing import Any, Dict
import yaml
from future.backports.urllib.parse import urlparse
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.cloud_build import (
CloudBuildCancelBuildOperator,
CloudBuildCreateBuildOperator,
CloudBuildCreateBuildTriggerOperator,
CloudBuildDeleteBuildTriggerOperator,
CloudBuildGetBuildOperator,
CloudBuildGetBuildTriggerOperator,
CloudBuildListBuildsOperator,
CloudBuildListBuildTriggersOperator,
CloudBuildRetryBuildOperator,
CloudBuildRunBuildTriggerOperator,
CloudBuildUpdateBuildTriggerOperator,
)
START_DATE = datetime(2021, 1, 1)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "aitflow-test-project")
GCP_SOURCE_ARCHIVE_URL = os.environ.get("GCP_CLOUD_BUILD_ARCHIVE_URL", "gs://airflow-test-bucket/file.tar.gz")
GCP_SOURCE_REPOSITORY_NAME = os.environ.get("GCP_CLOUD_BUILD_REPOSITORY_NAME", "airflow-test-repo")
GCP_SOURCE_ARCHIVE_URL_PARTS = urlparse(GCP_SOURCE_ARCHIVE_URL)
GCP_SOURCE_BUCKET_NAME = GCP_SOURCE_ARCHIVE_URL_PARTS.netloc
CURRENT_FOLDER = Path(__file__).parent
# [START howto_operator_gcp_create_build_trigger_body]
create_build_trigger_body = {
"name": "test-cloud-build-trigger",
"trigger_template": {
"project_id": GCP_PROJECT_ID,
"repo_name": GCP_SOURCE_REPOSITORY_NAME,
"branch_name": "master",
},
"filename": "cloudbuild.yaml",
}
# [END howto_operator_gcp_create_build_trigger_body]
update_build_trigger_body = {
"name": "test-cloud-build-trigger",
"trigger_template": {
"project_id": GCP_PROJECT_ID,
"repo_name": GCP_SOURCE_REPOSITORY_NAME,
"branch_name": "dev",
},
"filename": "cloudbuild.yaml",
}
# [START howto_operator_gcp_create_build_from_storage_body]
create_build_from_storage_body = {
"source": {"storage_source": GCP_SOURCE_ARCHIVE_URL},
"steps": [
{
"name": "gcr.io/cloud-builders/docker",
"args": ["build", "-t", f"gcr.io/$PROJECT_ID/{GCP_SOURCE_BUCKET_NAME}", "."],
}
],
"images": [f"gcr.io/$PROJECT_ID/{GCP_SOURCE_BUCKET_NAME}"],
}
# [END howto_operator_gcp_create_build_from_storage_body]
# [START howto_operator_create_build_from_repo_body]
create_build_from_repo_body: Dict[str, Any] = {
"source": {"repo_source": {"repo_name": GCP_SOURCE_REPOSITORY_NAME, "branch_name": "main"}},
"steps": [
{
"name": "gcr.io/cloud-builders/docker",
"args": ["build", "-t", "gcr.io/$PROJECT_ID/$REPO_NAME", "."],
}
],
"images": ["gcr.io/$PROJECT_ID/$REPO_NAME"],
}
# [END howto_operator_create_build_from_repo_body]
with models.DAG(
"example_gcp_cloud_build",
schedule_interval='@once',
start_date=START_DATE,
catchup=False,
tags=["example"],
) as build_dag:
# [START howto_operator_create_build_from_storage]
create_build_from_storage = CloudBuildCreateBuildOperator(
task_id="create_build_from_storage", project_id=GCP_PROJECT_ID, build=create_build_from_storage_body
)
# [END howto_operator_create_build_from_storage]
# [START howto_operator_create_build_from_storage_result]
create_build_from_storage_result = BashOperator(
bash_command=f"echo { create_build_from_storage.output['results'] }",
task_id="create_build_from_storage_result",
)
# [END howto_operator_create_build_from_storage_result]
# [START howto_operator_create_build_from_repo]
create_build_from_repo = CloudBuildCreateBuildOperator(
task_id="create_build_from_repo", project_id=GCP_PROJECT_ID, build=create_build_from_repo_body
)
# [END howto_operator_create_build_from_repo]
# [START howto_operator_create_build_from_repo_result]
create_build_from_repo_result = BashOperator(
bash_command=f"echo { create_build_from_repo.output['results'] }",
task_id="create_build_from_repo_result",
)
# [END howto_operator_create_build_from_repo_result]
# [START howto_operator_list_builds]
list_builds = CloudBuildListBuildsOperator(
task_id="list_builds", project_id=GCP_PROJECT_ID, location="global"
)
# [END howto_operator_list_builds]
# [START howto_operator_create_build_without_wait]
create_build_without_wait = CloudBuildCreateBuildOperator(
task_id="create_build_without_wait",
project_id=GCP_PROJECT_ID,
build=create_build_from_repo_body,
wait=False,
)
# [END howto_operator_create_build_without_wait]
# [START howto_operator_cancel_build]
cancel_build = CloudBuildCancelBuildOperator(
task_id="cancel_build",
id_=create_build_without_wait.output['id'],
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_cancel_build]
# [START howto_operator_retry_build]
retry_build = CloudBuildRetryBuildOperator(
task_id="retry_build",
id_=cancel_build.output['id'],
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_retry_build]
# [START howto_operator_get_build]
get_build = CloudBuildGetBuildOperator(
task_id="get_build",
id_=retry_build.output['id'],
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_get_build]
# [START howto_operator_gcp_create_build_from_yaml_body]
create_build_from_file = CloudBuildCreateBuildOperator(
task_id="create_build_from_file",
project_id=GCP_PROJECT_ID,
build=yaml.safe_load((Path(CURRENT_FOLDER) / 'example_cloud_build.yaml').read_text()),
params={'name': 'Airflow'},
)
# [END howto_operator_gcp_create_build_from_yaml_body]
create_build_from_storage >> create_build_from_storage_result
create_build_from_storage_result >> list_builds
create_build_from_repo >> create_build_from_repo_result
create_build_from_repo_result >> list_builds
list_builds >> create_build_without_wait >> cancel_build
cancel_build >> retry_build >> get_build
with models.DAG(
"example_gcp_cloud_build_trigger",
schedule_interval='@once',
start_date=START_DATE,
catchup=False,
tags=["example"],
) as build_trigger_dag:
# [START howto_operator_create_build_trigger]
create_build_trigger = CloudBuildCreateBuildTriggerOperator(
task_id="create_build_trigger", project_id=GCP_PROJECT_ID, trigger=create_build_trigger_body
)
# [END howto_operator_create_build_trigger]
# [START howto_operator_run_build_trigger]
run_build_trigger = CloudBuildRunBuildTriggerOperator(
task_id="run_build_trigger",
project_id=GCP_PROJECT_ID,
trigger_id=create_build_trigger.output['id'],
source=create_build_from_repo_body['source']['repo_source'],
)
# [END howto_operator_run_build_trigger]
# [START howto_operator_create_build_trigger]
update_build_trigger = CloudBuildUpdateBuildTriggerOperator(
task_id="update_build_trigger",
project_id=GCP_PROJECT_ID,
trigger_id=create_build_trigger.output['id'],
trigger=update_build_trigger_body,
)
# [END howto_operator_create_build_trigger]
# [START howto_operator_get_build_trigger]
get_build_trigger = CloudBuildGetBuildTriggerOperator(
task_id="get_build_trigger",
project_id=GCP_PROJECT_ID,
trigger_id=create_build_trigger.output['id'],
)
# [END howto_operator_get_build_trigger]
# [START howto_operator_delete_build_trigger]
delete_build_trigger = CloudBuildDeleteBuildTriggerOperator(
task_id="delete_build_trigger",
project_id=GCP_PROJECT_ID,
trigger_id=create_build_trigger.output['id'],
)
# [END howto_operator_delete_build_trigger]
# [START howto_operator_list_build_triggers]
list_build_triggers = CloudBuildListBuildTriggersOperator(
task_id="list_build_triggers", project_id=GCP_PROJECT_ID, location="global", page_size=5
)
# [END howto_operator_list_build_triggers]
chain(
create_build_trigger,
run_build_trigger,
update_build_trigger,
get_build_trigger,
delete_build_trigger,
list_build_triggers,
)
|
[] |
[] |
[
"GCP_PROJECT_ID",
"GCP_CLOUD_BUILD_REPOSITORY_NAME",
"GCP_CLOUD_BUILD_ARCHIVE_URL"
] |
[]
|
["GCP_PROJECT_ID", "GCP_CLOUD_BUILD_REPOSITORY_NAME", "GCP_CLOUD_BUILD_ARCHIVE_URL"]
|
python
| 3 | 0 | |
historyarchive/archive_test.go
|
// Copyright 2016 DigitalBits Development Foundation and contributors. Licensed
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
package historyarchive
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"math/big"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/xdbfoundation/go/xdr"
)
func GetTestS3Archive() *Archive {
mx := big.NewInt(0xffffffff)
r, e := rand.Int(rand.Reader, mx)
if e != nil {
panic(e)
}
bucket := fmt.Sprintf("s3://history-stg.digitalbits.org/dev/archivist/test-%s", r)
region := "eu-west-1"
if env_bucket := os.Getenv("ARCHIVIST_TEST_S3_BUCKET"); env_bucket != "" {
bucket = fmt.Sprintf(env_bucket+"/archivist/test-%s", r)
}
if env_region := os.Getenv("ARCHIVIST_TEST_S3_REGION"); env_region != "" {
region = env_region
}
return MustConnect(bucket, ConnectOptions{S3Region: region, CheckpointFrequency: 64})
}
func GetTestMockArchive() *Archive {
return MustConnect("mock://test", ConnectOptions{CheckpointFrequency: 64})
}
var tmpdirs []string
func GetTestFileArchive() *Archive {
d, e := ioutil.TempDir("/tmp", "archivist")
if e != nil {
panic(e)
}
if tmpdirs == nil {
tmpdirs = []string{d}
} else {
tmpdirs = append(tmpdirs, d)
}
return MustConnect("file://"+d, ConnectOptions{CheckpointFrequency: 64})
}
func cleanup() {
for _, d := range tmpdirs {
os.RemoveAll(d)
}
}
func GetTestArchive() *Archive {
ty := os.Getenv("ARCHIVIST_TEST_TYPE")
if ty == "file" {
return GetTestFileArchive()
} else if ty == "s3" {
return GetTestS3Archive()
} else {
return GetTestMockArchive()
}
}
func (arch *Archive) AddRandomBucket() (Hash, error) {
var h Hash
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return h, e
}
h = sha256.Sum256(buf)
pth := BucketPath(h)
e = arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
return h, e
}
func (arch *Archive) AddRandomCheckpointFile(cat string, chk uint32) error {
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return e
}
pth := CategoryCheckpointPath(cat, chk)
return arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
}
func (arch *Archive) AddRandomCheckpoint(chk uint32) error {
opts := &CommandOptions{Force: true}
for _, cat := range Categories() {
if cat == "history" {
var has HistoryArchiveState
has.CurrentLedger = chk
for i := 0; i < NumLevels; i++ {
curr, e := arch.AddRandomBucket()
if e != nil {
return e
}
snap, e := arch.AddRandomBucket()
if e != nil {
return e
}
next, e := arch.AddRandomBucket()
if e != nil {
return e
}
has.CurrentBuckets[i].Curr = curr.String()
has.CurrentBuckets[i].Snap = snap.String()
has.CurrentBuckets[i].Next.Output = next.String()
}
arch.PutCheckpointHAS(chk, has, opts)
arch.PutRootHAS(has, opts)
} else {
arch.AddRandomCheckpointFile(cat, chk)
}
}
return nil
}
func (arch *Archive) PopulateRandomRange(rng Range) error {
for chk := range rng.GenerateCheckpoints(arch.checkpointManager) {
if e := arch.AddRandomCheckpoint(chk); e != nil {
return e
}
}
return nil
}
func testRange() Range {
return Range{Low: 63, High: 0x3bf}
}
func testOptions() *CommandOptions {
return &CommandOptions{Range: testRange(), Concurrency: 16}
}
func GetRandomPopulatedArchive() *Archive {
a := GetTestArchive()
a.PopulateRandomRange(testRange())
return a
}
func TestScan(t *testing.T) {
defer cleanup()
opts := testOptions()
GetRandomPopulatedArchive().Scan(opts)
}
func TestScanSize(t *testing.T) {
defer cleanup()
opts := testOptions()
arch := GetRandomPopulatedArchive()
arch.Scan(opts)
assert.Equal(t, opts.Range.SizeInCheckPoints(arch.checkpointManager),
len(arch.checkpointFiles["history"]))
}
func TestScanSizeSubrange(t *testing.T) {
defer cleanup()
opts := testOptions()
arch := GetRandomPopulatedArchive()
opts.Range.Low = arch.checkpointManager.NextCheckpoint(opts.Range.Low)
opts.Range.High = arch.checkpointManager.PrevCheckpoint(opts.Range.High)
arch.Scan(opts)
assert.Equal(t, opts.Range.SizeInCheckPoints(arch.checkpointManager),
len(arch.checkpointFiles["history"]))
}
func TestScanSizeSubrangeFewBuckets(t *testing.T) {
defer cleanup()
opts := testOptions()
arch := GetRandomPopulatedArchive()
opts.Range.Low = 0x1ff
opts.Range.High = 0x1ff
arch.Scan(opts)
// We should only scan one checkpoint worth of buckets.
assert.Less(t, len(arch.allBuckets), 40)
}
func TestScanSizeSubrangeAllBuckets(t *testing.T) {
defer cleanup()
opts := testOptions()
arch := GetRandomPopulatedArchive()
arch.Scan(opts)
// We should scan all checkpoints worth of buckets.
assert.Less(t, 300, len(arch.allBuckets))
}
func countMissing(arch *Archive, opts *CommandOptions) int {
n := 0
arch.Scan(opts)
for _, missing := range arch.CheckCheckpointFilesMissing(opts) {
n += len(missing)
}
n += len(arch.CheckBucketsMissing())
return n
}
func TestMirror(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func copyFile(category string, checkpoint uint32, src *Archive, dst *Archive) {
pth := CategoryCheckpointPath(category, checkpoint)
rdr, err := src.backend.GetFile(pth)
if err != nil {
panic(err)
}
if err = dst.backend.PutFile(pth, rdr); err != nil {
panic(err)
}
}
func TestMirrorThenRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.SizeInCheckPoints(src.checkpointManager)/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
Repair(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func (a *Archive) MustGetRootHAS() HistoryArchiveState {
has, e := a.GetRootHAS()
if e != nil {
panic("failed to get root HAS")
}
return has
}
func TestMirrorSubsetDoPointerUpdate(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
oldHigh := opts.Range.High
assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger)
opts.Range.High = src.checkpointManager.NextCheckpoint(oldHigh)
src.AddRandomCheckpoint(opts.Range.High)
Mirror(src, dst, opts)
assert.Equal(t, opts.Range.High, dst.MustGetRootHAS().CurrentLedger)
}
func TestMirrorSubsetNoPointerUpdate(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
oldHigh := opts.Range.High
assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger)
src.AddRandomCheckpoint(src.checkpointManager.NextCheckpoint(oldHigh))
opts.Range.Low = 0x7f
opts.Range.High = 0xff
Mirror(src, dst, opts)
assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger)
}
func TestDryRunNoRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.SizeInCheckPoints(src.checkpointManager)/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
opts.DryRun = true
Repair(src, dst, opts)
assert.NotEqual(t, 0, countMissing(dst, opts))
}
func TestNetworkPassphrase(t *testing.T) {
makeHASReader := func() io.ReadCloser {
return ioutil.NopCloser(strings.NewReader(`
{
"version": 1,
"server": "v14.1.0rc2",
"currentLedger": 31883135,
"networkPassphrase": "LiveNet Global DigitalBits Network ; February 2021"
}`))
}
makeHASReaderNoNetwork := func() io.ReadCloser {
return ioutil.NopCloser(strings.NewReader(`
{
"version": 1,
"server": "v14.1.0rc2",
"currentLedger": 31883135
}`))
}
// No network passphrase set in options
archive := MustConnect("mock://test", ConnectOptions{CheckpointFrequency: 64})
err := archive.backend.PutFile("has.json", makeHASReader())
assert.NoError(t, err)
_, err = archive.GetPathHAS("has.json")
assert.NoError(t, err)
// No network passphrase set in HAS
archive = MustConnect("mock://test", ConnectOptions{
NetworkPassphrase: "LiveNet Global DigitalBits Network ; February 2021",
CheckpointFrequency: 64,
})
err = archive.backend.PutFile("has.json", makeHASReaderNoNetwork())
assert.NoError(t, err)
_, err = archive.GetPathHAS("has.json")
assert.NoError(t, err)
// Correct network passphrase set in options
archive = MustConnect("mock://test", ConnectOptions{
NetworkPassphrase: "LiveNet Global DigitalBits Network ; February 2021",
CheckpointFrequency: 64,
})
err = archive.backend.PutFile("has.json", makeHASReader())
assert.NoError(t, err)
_, err = archive.GetPathHAS("has.json")
assert.NoError(t, err)
// Incorrect network passphrase set in options
archive = MustConnect("mock://test", ConnectOptions{
NetworkPassphrase: "TestNet Global DigitalBits Network ; December 2020",
CheckpointFrequency: 64,
})
err = archive.backend.PutFile("has.json", makeHASReader())
assert.NoError(t, err)
_, err = archive.GetPathHAS("has.json")
assert.EqualError(t, err, "Network passphrase does not match! expected=TestNet Global DigitalBits Network ; December 2020 actual=LiveNet Global DigitalBits Network ; February 2021")
}
func TestXdrDecode(t *testing.T) {
xdrbytes := []byte{
0, 0, 0, 0, // entry type 0, liveentry
0, 32, 223, 100, // lastmodified 2154340
0, 0, 0, 0, // entry type 0, account
0, 0, 0, 0, // key type 0
23, 140, 68, 253, // ed25519 key (32 bytes)
184, 162, 186, 195,
118, 239, 158, 210,
100, 241, 174, 254,
108, 110, 165, 140,
75, 76, 83, 141,
104, 212, 227, 80,
1, 214, 157, 7,
0, 0, 0, 29, // 64bit balance: 125339976000
46, 216, 65, 64,
0, 0, 129, 170, // 64bit seqnum: 142567144423475
0, 0, 0, 51,
0, 0, 0, 1, // numsubentries: 1
0, 0, 0, 1, // inflationdest type, populated
0, 0, 0, 0, // key type 0
87, 240, 19, 71, // ed25519 key (32 bytes)
52, 91, 9, 62,
213, 239, 178, 85,
161, 119, 108, 251,
168, 90, 76, 116,
12, 48, 134, 248,
115, 255, 117, 50,
19, 18, 170, 203,
0, 0, 0, 0, // flags
0, 0, 0, 19, // homedomain: 19 bytes + 1 null padding
99, 101, 110, 116, // "centaurus.xcoins.de"
97, 117, 114, 117,
115, 46, 120, 99,
111, 105, 110, 115,
46, 100, 101, 0,
1, 0, 0, 0, // thresholds
0, 0, 0, 0, // signers (null)
0, 0, 0, 0, // entry.account.ext.v: 0
0, 0, 0, 0, // entry.ext.v: 0
}
assert.Equal(t, len(xdrbytes), 152)
var tmp xdr.BucketEntry
n, err := xdr.Unmarshal(bytes.NewReader(xdrbytes[:]), &tmp)
fmt.Printf("Decoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, len(xdrbytes), n)
var out bytes.Buffer
n, err = xdr.Marshal(&out, &tmp)
fmt.Printf("Encoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, out.Len(), n)
assert.Equal(t, out.Bytes(), xdrbytes)
}
|
[
"\"ARCHIVIST_TEST_S3_BUCKET\"",
"\"ARCHIVIST_TEST_S3_REGION\"",
"\"ARCHIVIST_TEST_TYPE\""
] |
[] |
[
"ARCHIVIST_TEST_S3_REGION",
"ARCHIVIST_TEST_TYPE",
"ARCHIVIST_TEST_S3_BUCKET"
] |
[]
|
["ARCHIVIST_TEST_S3_REGION", "ARCHIVIST_TEST_TYPE", "ARCHIVIST_TEST_S3_BUCKET"]
|
go
| 3 | 0 | |
app/server.py
|
import json
import os
import random
import bottle
from bottle import HTTPResponse
@bottle.route("/")
def index():
return "Your Battlesnake is alive! hahahahaa"
@bottle.post("/ping")
def ping():
"""
Used by the Battlesnake Engine to make sure your snake is still working.
"""
return HTTPResponse(status=200)
@bottle.post("/start")
def start():
"""
Called every time a new Battlesnake game starts and your snake is in it.
Your response will control how your snake is displayed on the board.
"""
data = bottle.request.json
print("START:", json.dumps(data))
response = {"color": "#00FF00", "headType": "regular", "tailType": "regular"}
return HTTPResponse(
status=200,
headers={"Content-Type": "application/json"},
body=json.dumps(response),
)
@bottle.post("/move")
def move():
"""
Called when the Battlesnake Engine needs to know your next move.
The data parameter will contain information about the board.
Your response must include your move of up, down, left, or right.
"""
data = bottle.request.json
print("MOVE:", json.dumps(data))
# Choose a random direction to move in
directions = ["up", "down", "left", "right"]
move = random.choice(directions)
# Shouts are messages sent to all the other snakes in the game.
# Shouts are not displayed on the game board.
shout = "I am a python snake!"
response = {"move": move, "shout": shout}
return HTTPResponse(
status=200,
headers={"Content-Type": "application/json"},
body=json.dumps(response),
)
@bottle.post("/end")
def end():
"""
Called every time a game with your snake in it ends.
"""
data = bottle.request.json
print("END:", json.dumps(data))
return HTTPResponse(status=200)
def main():
bottle.run(
application,
host=os.getenv("IP", "0.0.0.0"),
port=os.getenv("PORT", "8080"),
debug=os.getenv("DEBUG", True),
)
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PORT",
"IP",
"DEBUG"
] |
[]
|
["PORT", "IP", "DEBUG"]
|
python
| 3 | 0 | |
bottle.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2015, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ########################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to main() is at the end of the file.
def _cli_parse(args):
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] package.module:app")
opt = parser.add_option
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opts, args = parser.parse_args(args[1:])
return opts, args, parser
def _cli_patch(args):
opts, _, _ = _cli_parse(args)
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ###########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
py3k = sys.version_info.major > 2
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._add_change_listener(functools.partial(self.trigger_hook, 'config'))
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
def _inner_handle():
# Maybe pass variables as locals for better performance?
try:
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return _inner_handle()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
try:
out = None
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
except HTTPResponse:
return _e()
out = _inner_handle()
return out
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
self.trigger_hook('after_request')
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', 'Content-Length')),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_change_listener', '_fallbacks')
def __init__(self):
self._meta = {}
self._change_listener = []
self._fallbacks = []
def load_module(self, path, squash=True):
""" Load values from a Python module.
Example modue ``config.py``::
debug = True
sqlite = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = __import__(path)
obj = dict([(key, getattr(config_obj, key))
for key in dir(config_obj) if key.isupper()])
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commends
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def __missing__(self, key):
for fallback in self._fallbacks:
if key in fallback:
value = self[key] = fallback[key]
self.meta_set(key, 'fallback', fallback)
return value
raise KeyError(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def _set_fallback(self, fallback):
self._fallbacks.append(fallback)
@fallback._add_change_listener
def fallback_update(conf, key, value):
if self.meta_get(key, 'fallback') is conf:
self.meta_set(key, 'fallback', None)
dict.__delitem__(self, key)
@self._add_change_listener
def self_update(conf, key, value):
if conf.meta_get(key, 'fallback'):
conf.meta_set(key, 'fallback', None)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cli_parse(sys.argv)
def _cli_error(msg):
parser.print_help()
_stderr('\nError: %s\n' % msg)
sys.exit(1)
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in opt.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error:
_cli_error(str(_e()))
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError):
_cli_error("Unable to parse config file %r: %s" % (cfile, _e()))
for cval in opt.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug,
config=config)
# THE END
|
[] |
[] |
[
"BOTTLE_LOCKFILE",
"BOTTLE_CHILD"
] |
[]
|
["BOTTLE_LOCKFILE", "BOTTLE_CHILD"]
|
python
| 2 | 0 | |
samples/layer.go
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/aliyun/fc-go-sdk"
)
func main() {
client, _ := fc.NewClient(
os.Getenv("ENDPOINT"), "2016-08-15",
os.Getenv("ACCESS_KEY_ID"),
os.Getenv("ACCESS_KEY_SECRET"),
fc.WithTransport(&http.Transport{MaxIdleConnsPerHost: 100}))
// 层名称
layerName := "test-layer"
// 准备 Zip 格式的层文件
layZipFile := "./hello_world.zip"
// 指定兼容的运行时环境
compatibleRuntime := []string{"python3", "nodejs12"}
// 1. 发布层版本
fmt.Println("Publish layer versions")
data, err := ioutil.ReadFile(layZipFile)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
publishLayerVersionOutput, err := client.PublishLayerVersion(fc.NewPublishLayerVersionInput().
WithLayerName(layerName).
WithCode(fc.NewCode().WithZipFile(data)).
WithCompatibleRuntime(compatibleRuntime).
WithDescription("my layer"),
)
if err != nil {
fmt.Fprintln(os.Stderr, err)
} else {
fmt.Printf("PublishLayerVersion response: %+v \n\n", publishLayerVersionOutput)
}
// 2. 查询指定层版本信息
fmt.Printf("Get the layer of version %d\n", publishLayerVersionOutput.Layer.Version)
getLayerVersionOutput, err := client.GetLayerVersion(
fc.NewGetLayerVersionInput(layerName, publishLayerVersionOutput.Layer.Version))
if err != nil {
fmt.Fprintln(os.Stderr, err)
} else {
fmt.Printf("GetLayerVersion response: %+v \n\n", getLayerVersionOutput.Layer)
}
// 3. 获取层列表
fmt.Println("List layers")
nextToken := ""
layers := []*fc.Layer{}
for {
listLayersOutput, err := client.ListLayers(
fc.NewListLayersInput().
WithLimit(100).
WithNextToken(nextToken))
if err != nil {
fmt.Fprintln(os.Stderr, err)
break
}
if len(listLayersOutput.Layers) != 0 {
layers = append(layers, listLayersOutput.Layers...)
}
if listLayersOutput.NextToken == nil {
break
}
nextToken = *listLayersOutput.NextToken
}
fmt.Println("ListLayers response:")
for _, layer := range layers {
fmt.Printf("- layerName: %s, layerMaxVersion: %d\n", layer.LayerName, layer.Version)
}
// 4. 获取层版本列表
fmt.Println("List layer versions")
// 层的起始版本,默认从1开始
startVersion := int32(1)
fmt.Println("ListLayerVersions response:")
layerVersions := []*fc.Layer{}
for {
listLayerVersionsOutput, err := client.ListLayerVersions(
fc.NewListLayerVersionsInput(layerName, startVersion).
WithLimit(100))
if err != nil {
if err, ok := err.(*fc.ServiceError); ok &&
err.HTTPStatus == http.StatusNotFound {
break
}
fmt.Fprintln(os.Stderr, err)
break
}
if len(listLayerVersionsOutput.Layers) > 0 {
layerVersions = append(layerVersions, listLayerVersionsOutput.Layers...)
}
if listLayerVersionsOutput.NextVersion == nil ||
*listLayerVersionsOutput.NextVersion == 0 {
break
}
startVersion = *listLayerVersionsOutput.NextVersion
}
for _, layer := range layerVersions {
fmt.Printf("- layerName: %s, layerVersion: %d\n", layer.LayerName, layer.Version)
}
// 5. 删除层版本
fmt.Printf("Delete the layer of version %d \n", publishLayerVersionOutput.Layer.Version)
deleteLayerVersionOutput, err := client.DeleteLayerVersion(
fc.NewDeleteLayerVersionInput(layerName, publishLayerVersionOutput.Layer.Version))
if err != nil {
fmt.Fprintln(os.Stderr, err)
} else {
fmt.Printf("DeleteLayerVersion response: %+v \n\n", deleteLayerVersionOutput)
}
}
|
[
"\"ENDPOINT\"",
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\""
] |
[] |
[
"ENDPOINT",
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID"
] |
[]
|
["ENDPOINT", "ACCESS_KEY_SECRET", "ACCESS_KEY_ID"]
|
go
| 3 | 0 | |
FHIR_Tester_backend/sandbox/resource_tester.py
|
import os
import sys
pro_dir = os.getcwd()
sys.path.append(pro_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FHIR_Tester.settings")
from services.genomics_test_generator.fhir_genomics_test_gene import *
from services.request_sender import *
from services.create_resource import *
spec_basepath = 'resources/spec/'
resource_basepath = 'resources/json/'
def iter_all_cases(resource_type, all_cases, url,id_dict, access_token=None):
#test right cases
print 'test'
isSuccessful = True
for case in all_cases['right']:
case = set_reference(case,id_dict)
response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if isinstance(response, dict) and 'issue' in response and response['issue'][0]['severity'] == 'information':
isSuccessful = isSuccessful and True
else:
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += response['issue'][0]['diagnostics']
isSuccessful = isSuccessful and False
print "%s:Proper %s cases tested:%s" % (resource_type, resource_type, 'success' if isSuccessful else 'fail')
isSuccessfulFalse = True
for case_with_info in all_cases['wrong']:
case = case_with_info['case']
response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if isinstance(response, dict) and 'issue' in response and response['issue'][0]['severity'] == 'information':
isSuccessfulFalse = isSuccessfulFalse and False
else:
isSuccessfulFalse = isSuccessfulFalse and True
print "%s:Improper %s cases tested:%s" % (resource_type, resource_type, 'success' if isSuccessfulFalse else 'fail')
return isSuccessful and isSuccessfulFalse
def test_a_resource(resource_name, url, access_token=None):
print resource_name
#setup
id_dict = setup(url, access_token)
spec_filename = '%s%s.csv' % (spec_basepath, resource_name)
print spec_filename
all_cases = create_all_test_case4type(spec_filename, resource_name)
if not url.endswith('/'):
url += '/'
isSuccessful = iter_all_cases(resource_name, all_cases, '%s%s' % (url, resource_name),id_dict, access_token)
print "%s:All %s cases tested:%s" % (resource_name, resource_name, 'success' if isSuccessful else 'fail')
return
def create_all_test_case4type(resource_spec_filename,resource_type):
#load spec
csv_reader = csv.reader(open(resource_spec_filename, 'r'))
detail_dict = trans_csv_to_dict(csv_reader)
del csv_reader
#generate all cases
test_cases = create_element_test_cases(detail_dict)
right_cases, wrong_cases = create_orthogonal_test_cases(test_cases)
#wrap test cases
all_cases = {}
all_cases['right'] = []
all_cases['wrong'] = []
for case in right_cases:
case['resourceType'] = resource_type
all_cases['right'].append(case)
for case in wrong_cases:
case['case']['resourceType'] = resource_type
all_cases['wrong'].append(case)
#return all cases
return all_cases
def ana_pre_creation_result(raw_info):
processed_info = {}
for key in raw_info:
if raw_info[key] and 'issue' in raw_info[key]:
if raw_info[key]['issue'][0]['severity'] == 'information':
processed_info[key] = True
else:
processed_info[key] = False
return processed_info
def setup(url, access_token=None):
create_res, id_dict = create_pre_resources(url, 'resources', access_token)
pre_resource_result = ana_pre_creation_result(create_res)
# print pre_resource_result
status = True
for key in pre_resource_result:
status = status and pre_resource_result[key]
print "Setup:Setup:%s" % "success" if status else "fail"
return id_dict
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
services/drds/submit_rollback_sharding_key_modify.go
|
package drds
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/responses"
)
// SubmitRollbackShardingKeyModify invokes the drds.SubmitRollbackShardingKeyModify API synchronously
// api document: https://help.aliyun.com/api/drds/submitrollbackshardingkeymodify.html
func (client *Client) SubmitRollbackShardingKeyModify(request *SubmitRollbackShardingKeyModifyRequest) (response *SubmitRollbackShardingKeyModifyResponse, err error) {
response = CreateSubmitRollbackShardingKeyModifyResponse()
err = client.DoAction(request, response)
return
}
// SubmitRollbackShardingKeyModifyWithChan invokes the drds.SubmitRollbackShardingKeyModify API asynchronously
// api document: https://help.aliyun.com/api/drds/submitrollbackshardingkeymodify.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SubmitRollbackShardingKeyModifyWithChan(request *SubmitRollbackShardingKeyModifyRequest) (<-chan *SubmitRollbackShardingKeyModifyResponse, <-chan error) {
responseChan := make(chan *SubmitRollbackShardingKeyModifyResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.SubmitRollbackShardingKeyModify(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// SubmitRollbackShardingKeyModifyWithCallback invokes the drds.SubmitRollbackShardingKeyModify API asynchronously
// api document: https://help.aliyun.com/api/drds/submitrollbackshardingkeymodify.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SubmitRollbackShardingKeyModifyWithCallback(request *SubmitRollbackShardingKeyModifyRequest, callback func(response *SubmitRollbackShardingKeyModifyResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *SubmitRollbackShardingKeyModifyResponse
var err error
defer close(result)
response, err = client.SubmitRollbackShardingKeyModify(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// SubmitRollbackShardingKeyModifyRequest is the request struct for api SubmitRollbackShardingKeyModify
type SubmitRollbackShardingKeyModifyRequest struct {
*requests.RpcRequest
DrdsInstanceId string `position:"Query" name:"DrdsInstanceId"`
DbName string `position:"Query" name:"DbName"`
TaskId string `position:"Query" name:"TaskId"`
}
// SubmitRollbackShardingKeyModifyResponse is the response struct for api SubmitRollbackShardingKeyModify
type SubmitRollbackShardingKeyModifyResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Data bool `json:"Data" xml:"Data"`
}
// CreateSubmitRollbackShardingKeyModifyRequest creates a request to invoke SubmitRollbackShardingKeyModify API
func CreateSubmitRollbackShardingKeyModifyRequest() (request *SubmitRollbackShardingKeyModifyRequest) {
request = &SubmitRollbackShardingKeyModifyRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Drds", "2019-01-23", "SubmitRollbackShardingKeyModify", "Drds", "openAPI")
return
}
// CreateSubmitRollbackShardingKeyModifyResponse creates a response to parse from SubmitRollbackShardingKeyModify response
func CreateSubmitRollbackShardingKeyModifyResponse() (response *SubmitRollbackShardingKeyModifyResponse) {
response = &SubmitRollbackShardingKeyModifyResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/otelclicarrier.go
|
package cmd
import (
"bytes"
"context"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"go.opentelemetry.io/otel"
)
var envTp string // global state
var checkTracecarrierRe *regexp.Regexp
// OtelCliCarrier implements the OpenTelemetry TextMapCarrier interface that
// supports only one key/value for the traceparent and does nothing else
type OtelCliCarrier struct{}
func init() {
// only anchored at the front because traceparents can include more things
// per the standard but only the first 4 are required for our uses
checkTracecarrierRe = regexp.MustCompile("^[[:xdigit:]]{2}-[[:xdigit:]]{32}-[[:xdigit:]]{16}-[[:xdigit:]]{2}")
}
func NewOtelCliCarrier() OtelCliCarrier {
return OtelCliCarrier{}
}
// Get returns the traceparent string if key is "traceparent" otherwise nothing
func (ec OtelCliCarrier) Get(key string) string {
if key == "traceparent" {
return envTp
} else {
return ""
}
}
// Set sets the global traceparent if key is "traceparent" otherwise nothing
func (ec OtelCliCarrier) Set(key string, value string) {
if key == "traceparent" {
envTp = value
}
}
// Keys returns a list of strings containing just "traceparent"
func (ec OtelCliCarrier) Keys() []string {
return []string{"traceparent"}
}
// Clear sets the traceparent to empty string. Mainly for testing.
func (ec OtelCliCarrier) Clear() {
envTp = ""
}
// loadTraceparent checks the environment first for TRACEPARENT then if filename
// isn't empty, it will read that file and look for a bare traceparent in that
// file.
func loadTraceparent(ctx context.Context, filename string) context.Context {
ctx = loadTraceparentFromEnv(ctx)
if filename != "" {
ctx = loadTraceparentFromFile(ctx, filename)
}
if traceparentRequired {
tp := getTraceparent(ctx) // get the text representation in the context
if len(tp) > 0 && checkTracecarrierRe.MatchString(tp) {
parts := strings.Split(tp, "-") // e.g. 00-9765b2f71c68b04dc0ad2a4d73027d6f-1881444346b6296e-01
// return from here if everything looks ok, otherwise fall through to the log.Fatal
if len(parts) > 3 && parts[1] != "00000000000000000000000000000000" && parts[2] != "0000000000000000" {
return ctx
}
}
log.Fatalf("failed to find a valid traceparent carrier in either environment for file '%s' while it's required by --tp-required", filename)
}
return ctx
}
// loadTraceparentFromFile reads a traceparent from filename and returns a
// context with the traceparent set. The format for the file as written is
// just a bare traceparent string. Whitespace, "export " and "TRACEPARENT=" are
// stripped automatically so the file can also be a valid shell snippet.
func loadTraceparentFromFile(ctx context.Context, filename string) context.Context {
file, err := os.Open(filename)
if err != nil {
// only fatal when the tp carrier file is required explicitly, otherwise
// just silently return the unmodified context
if traceparentRequired {
log.Fatalf("could not open file '%s' for read: %s", filename, err)
} else {
return ctx
}
}
data, err := ioutil.ReadAll(file)
if err != nil {
log.Fatalf("failure while reading from file '%s': %s", filename, err)
}
tp := bytes.TrimSpace(data)
if len(tp) == 0 {
return ctx
}
// also accept 'export TRACEPARENT=' and 'TRACEPARENT='
tp = bytes.TrimPrefix(tp, []byte("export "))
tp = bytes.TrimPrefix(tp, []byte("TRACEPARENT="))
if !checkTracecarrierRe.Match(tp) {
// I have a doubt: should this be a soft failure?
log.Fatalf("file '%s' was read but does not contain a valid traceparent", filename)
}
carrier := NewOtelCliCarrier()
carrier.Set("traceparent", string(tp))
prop := otel.GetTextMapPropagator()
return prop.Extract(ctx, carrier)
}
// saveTraceparentToFile takes a context and filename and writes the tp from
// that context into the specified file.
func saveTraceparentToFile(ctx context.Context, filename string) {
if filename == "" {
return
}
tp := getTraceparent(ctx)
err := ioutil.WriteFile(filename, []byte(tp), 0600)
if err != nil {
log.Fatalf("failure while writing to file '%s': %s", filename, err)
}
}
// loadTraceparentFromEnv loads the traceparent from the environment variable
// TRACEPARENT and sets it in the returned Go context.
func loadTraceparentFromEnv(ctx context.Context) context.Context {
// don't load the envvar when --tp-ignore-env is set
if traceparentIgnoreEnv {
return ctx
}
tp := os.Getenv("TRACEPARENT")
if tp == "" {
return ctx
}
// https://github.com/open-telemetry/opentelemetry-go/blob/main/propagation/trace_context.go#L31
// the 'traceparent' key is a private constant in the otel library so this
// is using an internal detail but it's probably fine
carrier := NewOtelCliCarrier()
carrier.Set("traceparent", tp)
prop := otel.GetTextMapPropagator()
return prop.Extract(ctx, carrier)
}
// getTraceparent returns the the traceparent string from the context
// passed in and should reflect the most recent state, e.g. to print out
func getTraceparent(ctx context.Context) string {
prop := otel.GetTextMapPropagator()
carrier := NewOtelCliCarrier()
prop.Inject(ctx, carrier)
return carrier.Get("traceparent")
}
|
[
"\"TRACEPARENT\""
] |
[] |
[
"TRACEPARENT"
] |
[]
|
["TRACEPARENT"]
|
go
| 1 | 0 | |
templates/api-template-python/components/logger.py
|
from datetime import datetime
from inspect import currentframe, getframeinfo
import os
'''
USAGE
from components.logger import Logger
logger = Logger(event, context)
# Examples
logger.error('Runtime errors or unexpected conditions.');
logger.warn('Runtime situations that are undesirable or unexpected, but not necessarily "wrong".');
logger.info('Interesting runtime events (Eg. connection established, data fetched etc).');
logger.verbose('Generally speaking, most lines logged by your application should be written as verbose.');
logger.debug('Detailed information on the flow through the system');
'''
class Logger(object):
# init log_level, default config, context_details
def __init__(self, event={}, context={}):
self.log_levels = {
'error': 4,
'warn': 3,
'info': 2,
'verbose': 1,
'debug': 0
}
self.config = {
'cur_loglevel': 'info',
'context_details': '',
'show_timestamp': True,
'show_linenumber': True
}
# self.logger = logging.getLogger()
# self.logger.set_level(logging.INFO)
self.set_level()
if context is not None:
aws_request_id = None
if isinstance(context, dict):
aws_request_id = context.get('aws_request_id')
else:
aws_request_id = context.aws_request_id
self.set_context_details('', aws_request_id)
fname = self.set_context_details.__code__.co_filename
self._srcfile = _srcfile = os.path.normcase(fname)
# TODO: For future use, may be to add context information
def set_context_details(self, label, value):
# Timestamp and requestID are prepended in cloudwatch log by default;
# If any other details are required it can be done here.
if value is not None and value is not '':
rd = self.config.get('context_details')
if label is not None and label is not '':
label = label + ': '
rd = rd + str(label) + str(value) + ' '
self.config['context_details'] = rd
else:
self.config['context_details'] = ''
def print_file(self, fil):
print (self.get_linenumber())
def get_linenumber(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file): 0"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == self._srcfile:
f = f.f_back
continue
sinfo = None
rv = str(co.co_filename) + ":" + str(f.f_lineno)
break
return rv
# set current log_level
# Only logs which are above the cur_loglevel will be logged;
def set_level(self, level=None):
# LOG_LEVEL is 'info' by default
if level is not None and self.log_levels[level] is not None:
# If LOG_LEVEL if explicitly specified , set it as the cur_loglevel
self.config['cur_loglevel'] = level
return level
else:
# Get LOG_LEVEL from the environment variables (if defined)
try:
import os
level = os.environ.get('LOG_LEVEL')
except Exception as e:
self.error('error trying to access LOG_LEVEL')
raise e
if level is not None and self.log_levels[level] is not None:
self.config['cur_loglevel'] = level
return level
return
def log(self, level, message):
# format message as required.
if self.config.get('show_timestamp'):
# timestamp = str(datetime.datetime.now()) + " "
timestamp = str(datetime.utcnow().strftime(
'%Y-%m-%dT%H:%M:%S.%f')[:-3] + "Z ")
else:
timestamp = ""
if self.config.get('show_linenumber') is True:
linenumber = self.get_linenumber() + " "
else:
linenumber = ""
cur_loglevel = self.config.get('cur_loglevel')
if self.log_levels[level] >= self.log_levels[cur_loglevel]:
output_message = timestamp + \
self.config.get('context_details') + \
timestamp + str(level).upper() + "\t" + \
linenumber + \
message
print(output_message)
return
def error(self, message):
self.log('error', message)
def warn(self, message):
self.log('warn', message)
def info(self, message):
self.log('info', message)
def verbose(self, message):
self.log('verbose', message)
def debug(self, message):
self.log('debug', message)
|
[] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
python
| 1 | 0 | |
testutils/source.go
|
package testutils
import "github.com/securego/gosec"
// CodeSample encapsulates a snippet of source code that compiles, and how many errors should be detected
type CodeSample struct {
Code []string
Errors int
Config gosec.Config
}
var (
// SampleCodeG101 code snippets for hardcoded credentials
SampleCodeG101 = []CodeSample{{[]string{`
package main
import "fmt"
func main() {
username := "admin"
password := "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Entropy check should not report this error by default
package main
import "fmt"
func main() {
username := "admin"
password := "secret"
fmt.Println("Doing something with: ", username, password)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import "fmt"
var password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
func main() {
username := "admin"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import "fmt"
const password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
func main() {
username := "admin"
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import "fmt"
const (
username = "user"
password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
)
func main() {
fmt.Println("Doing something with: ", username, password)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
var password string
func init() {
password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
const (
ATNStateSomethingElse = 1
ATNStateTokenStart = 42
)
func main() {
println(ATNStateTokenStart)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
const (
ATNStateTokenStart = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef"
)
func main() {
println(ATNStateTokenStart)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG102 code snippets for network binding
SampleCodeG102 = []CodeSample{
// Bind to all networks explicitly
{[]string{`
package main
import (
"log"
"net"
)
func main() {
l, err := net.Listen("tcp", "0.0.0.0:2000")
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks implicitly (default if host omitted)
{[]string{`
package main
import (
"log"
"net"
)
func main() {
l, err := net.Listen("tcp", ":2000")
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks indirectly through a parsing function
{[]string{`
package main
import (
"log"
"net"
)
func parseListenAddr(listenAddr string) (network string, addr string) {
return "", ""
}
func main() {
addr := ":2000"
l, err := net.Listen(parseListenAddr(addr))
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
// Bind to all networks indirectly through a parsing function
{[]string{`
package main
import (
"log"
"net"
)
const addr = ":2000"
func parseListenAddr(listenAddr string) (network string, addr string) {
return "", ""
}
func main() {
l, err := net.Listen(parseListenAddr(addr))
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
{[]string{`
package main
import (
"log"
"net"
)
const addr = "0.0.0.0:2000"
func main() {
l, err := net.Listen("tcp", addr)
if err != nil {
log.Fatal(err)
}
defer l.Close()
}`}, 1, gosec.NewConfig()},
}
// SampleCodeG103 find instances of unsafe blocks for auditing purposes
SampleCodeG103 = []CodeSample{
{[]string{`
package main
import (
"fmt"
"unsafe"
)
type Fake struct{}
func (Fake) Good() {}
func main() {
unsafeM := Fake{}
unsafeM.Good()
intArray := [...]int{1, 2}
fmt.Printf("\nintArray: %v\n", intArray)
intPtr := &intArray[0]
fmt.Printf("\nintPtr=%p, *intPtr=%d.\n", intPtr, *intPtr)
addressHolder := uintptr(unsafe.Pointer(intPtr)) + unsafe.Sizeof(intArray[0])
intPtr = (*int)(unsafe.Pointer(addressHolder))
fmt.Printf("\nintPtr=%p, *intPtr=%d.\n\n", intPtr, *intPtr)
}`}, 3, gosec.NewConfig()}}
// SampleCodeG104 finds errors that aren't being handled
SampleCodeG104 = []CodeSample{
{[]string{`
package main
import "fmt"
func test() (int,error) {
return 0, nil
}
func main() {
v, _ := test()
fmt.Println(v)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() error {
return fmt.Errorf("This is an error")
}
func b() {
fmt.Println("b")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func c() string {
return fmt.Sprintf("This isn't anything")
}
func main() {
_ = a()
a()
b()
c()
}`}, 2, gosec.NewConfig()}, {[]string{`
package main
import "fmt"
func test() error {
return nil
}
func main() {
e := test()
fmt.Println(e)
}`}, 0, gosec.NewConfig()}, {[]string{`
// +build go1.10
package main
import "strings"
func main() {
var buf strings.Builder
_, err := buf.WriteString("test string")
if err != nil {
panic(err)
}
}`, `
package main
func dummy(){}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"bytes"
)
type a struct {
buf *bytes.Buffer
}
func main() {
a := &a{
buf: new(bytes.Buffer),
}
a.buf.Write([]byte{0})
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() {
fmt.Println("a")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func main() {
a()
}`}, 0, gosec.Config{"G104": map[string]interface{}{"ioutil": []interface{}{"WriteFile"}}}}, {[]string{`
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
func createBuffer() *bytes.Buffer {
return new(bytes.Buffer)
}
func main() {
new(bytes.Buffer).WriteString("*bytes.Buffer")
fmt.Fprintln(os.Stderr, "fmt")
new(strings.Builder).WriteString("*strings.Builder")
_, pw := io.Pipe()
pw.CloseWithError(io.EOF)
createBuffer().WriteString("*bytes.Buffer")
b := createBuffer()
b.WriteString("*bytes.Buffer")
}`}, 0, gosec.NewConfig()}} // it shoudn't return any errors because all method calls are whitelisted by default
// SampleCodeG104Audit finds errors that aren't being handled in audit mode
SampleCodeG104Audit = []CodeSample{
{[]string{`
package main
import "fmt"
func test() (int,error) {
return 0, nil
}
func main() {
v, _ := test()
fmt.Println(v)
}`}, 1, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
package main
import (
"io/ioutil"
"os"
"fmt"
)
func a() error {
return fmt.Errorf("This is an error")
}
func b() {
fmt.Println("b")
ioutil.WriteFile("foo.txt", []byte("bar"), os.ModeExclusive)
}
func c() string {
return fmt.Sprintf("This isn't anything")
}
func main() {
_ = a()
a()
b()
c()
}`}, 3, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
package main
import "fmt"
func test() error {
return nil
}
func main() {
e := test()
fmt.Println(e)
}`}, 0, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}, {[]string{`
// +build go1.10
package main
import "strings"
func main() {
var buf strings.Builder
_, err := buf.WriteString("test string")
if err != nil {
panic(err)
}
}`, `
package main
func dummy(){}
`}, 0, gosec.Config{gosec.Globals: map[gosec.GlobalOption]string{gosec.Audit: "enabled"}}}}
// SampleCodeG106 - ssh InsecureIgnoreHostKey
SampleCodeG106 = []CodeSample{{[]string{`
package main
import (
"golang.org/x/crypto/ssh"
)
func main() {
_ = ssh.InsecureIgnoreHostKey()
}`}, 1, gosec.NewConfig()}}
// SampleCodeG107 - SSRF via http requests with variable url
SampleCodeG107 = []CodeSample{{[]string{`
// Input from the std in is considered insecure
package main
import (
"net/http"
"io/ioutil"
"fmt"
"os"
"bufio"
)
func main() {
in := bufio.NewReader(os.Stdin)
url, err := in.ReadString('\n')
if err != nil {
panic(err)
}
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Variable defined a package level can be changed at any time
// regardless of the initial value
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
var url string = "https://www.google.com"
func main() {
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Environmental variables are not considered as secure source
package main
import (
"net/http"
"io/ioutil"
"fmt"
"os"
)
func main() {
url := os.Getenv("tainted_url")
resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("%s", body)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Constant variables or hard-coded strings are secure
package main
import (
"fmt"
"net/http"
)
const url = "http://127.0.0.1"
func main() {
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
var url string = "http://127.0.0.1"
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
url := "http://127.0.0.1"
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// A variable at function scope which is initialized to
// a constant string is secure (e.g. cannot be changed concurrently)
package main
import (
"fmt"
"net/http"
)
func main() {
url1 := "test"
var url2 string = "http://127.0.0.1"
url2 = url1
resp, err := http.Get(url2)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 0, gosec.NewConfig()}, {[]string{`
// An exported variable declared a packaged scope is not secure
// because it can changed at any time
package main
import (
"fmt"
"net/http"
)
var Url string
func main() {
resp, err := http.Get(Url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}`}, 1, gosec.NewConfig()}, {[]string{`
// An url provided as a function argument is not secure
package main
import (
"fmt"
"net/http"
)
func get(url string) {
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
fmt.Println(resp.Status)
}
func main() {
url := "http://127.0.0.1"
get(url)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG108 - pprof endpoint automatically exposed
SampleCodeG108 = []CodeSample{{[]string{`
package main
import (
"fmt"
"log"
"net/http"
_ "net/http/pprof"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
})
log.Fatal(http.ListenAndServe(":8080", nil))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"log"
"net/http"
"net/http/pprof"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
})
log.Fatal(http.ListenAndServe(":8080", nil))
}`}, 0, gosec.NewConfig()}}
// SampleCodeG109 - Potential Integer OverFlow
SampleCodeG109 = []CodeSample{
{[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
value := int32(bigValue)
fmt.Println(value)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("32768")
if err != nil {
panic(err)
}
if int16(bigValue) < 0 {
fmt.Println(bigValue)
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
fmt.Println(bigValue)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
bigValue, err := strconv.Atoi("2147483648")
if err != nil {
panic(err)
}
fmt.Println(bigValue)
test()
}
func test() {
bigValue := 30
value := int32(bigValue)
fmt.Println(value)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"strconv"
)
func main() {
value := 10
if value == 10 {
value, _ := strconv.Atoi("2147483648")
fmt.Println(value)
}
v := int32(value)
fmt.Println(v)
}`}, 0, gosec.NewConfig()}}
// SampleCodeG110 - potential DoS vulnerability via decompression bomb
SampleCodeG110 = []CodeSample{
{[]string{`
package main
import (
"bytes"
"compress/zlib"
"io"
"os"
)
func main() {
buff := []byte{120, 156, 202, 72, 205, 201, 201, 215, 81, 40, 207,
47, 202, 73, 225, 2, 4, 0, 0, 255, 255, 33, 231, 4, 147}
b := bytes.NewReader(buff)
r, err := zlib.NewReader(b)
if err != nil {
panic(err)
}
io.Copy(os.Stdout, r)
r.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"archive/zip"
"io"
"os"
"strconv"
)
func main() {
r, err := zip.OpenReader("tmp.zip")
if err != nil {
panic(err)
}
defer r.Close()
for i, f := range r.File {
out, err := os.OpenFile("output" + strconv.Itoa(i), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
panic(err)
}
rc, err := f.Open()
if err != nil {
panic(err)
}
_, err = io.Copy(out, rc)
out.Close()
rc.Close()
if err != nil {
panic(err)
}
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"io"
"os"
)
func main() {
s, err := os.Open("src")
if err != nil {
panic(err)
}
defer s.Close()
d, err := os.Create("dst")
if err != nil {
panic(err)
}
defer d.Close()
_, err = io.Copy(d, s)
if err != nil {
panic(err)
}
}`}, 0, gosec.NewConfig()}}
// SampleCodeG201 - SQL injection via format string
SampleCodeG201 = []CodeSample{
{[]string{`
// Format string without proper quoting
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where name = '%s'", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// Format string false positive, safe string spec.
package main
import (
"database/sql"
"fmt"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM foo where id = %d", os.Args[1])
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// Format string false positive
package main
import (
"database/sql"
)
const staticQuery = "SELECT * FROM foo WHERE age < 32"
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// Format string false positive, quoted formatter argument.
package main
import (
"database/sql"
"fmt"
"os"
"github.com/lib/pq"
)
func main(){
db, err := sql.Open("postgres", "localhost")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM %s where id = 1", pq.QuoteIdentifier(os.Args[1]))
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
// false positive
package main
import (
"database/sql"
"fmt"
)
const Table = "foo"
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
q := fmt.Sprintf("SELECT * FROM %s where id = 1", Table)
rows, err := db.Query(q)
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
)
func main(){
fmt.Sprintln()
}`}, 0, gosec.NewConfig()}}
// SampleCodeG202 - SQL query string building via string concatenation
SampleCodeG202 = []CodeSample{
{[]string{`
package main
import (
"database/sql"
"os"
)
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("SELECT * FROM foo WHERE name = " + os.Args[1])
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 1, gosec.NewConfig()}, {[]string{`
// false positive
package main
import (
"database/sql"
)
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery + "32")
if err != nil {
panic(err)
}
defer rows.Close()
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"database/sql"
)
const age = "32"
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query(staticQuery + age)
if err != nil {
panic(err)
}
defer rows.Close()
}
`}, 0, gosec.NewConfig()}, {[]string{`
package main
const gender = "M"
`, `
package main
import (
"database/sql"
)
const age = "32"
var staticQuery = "SELECT * FROM foo WHERE age < "
func main(){
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
panic(err)
}
rows, err := db.Query("SELECT * FROM foo WHERE gender = " + gender)
if err != nil {
panic(err)
}
defer rows.Close()
}
`}, 0, gosec.NewConfig()}}
// SampleCodeG203 - Template checks
SampleCodeG203 = []CodeSample{
{[]string{`
// We assume that hardcoded template strings are safe as the programmer would
// need to be explicitly shooting themselves in the foot (as below)
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.HTML("<script>alert(1)</script>"),
}
t.Execute(os.Stdout, v)
}`}, 0, gosec.NewConfig()}, {[]string{
`
// Using a variable to initialize could potentially be dangerous. Under the
// current model this will likely produce some false positives.
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.HTML(a),
}
t.Execute(os.Stdout, v)
}`}, 1, gosec.NewConfig()}, {[]string{
`
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.JS(a),
}
t.Execute(os.Stdout, v)
}`}, 1, gosec.NewConfig()}, {[]string{
`
package main
import (
"html/template"
"os"
)
const tmpl = ""
func main() {
a := "something from another place"
t := template.Must(template.New("ex").Parse(tmpl))
v := map[string]interface{}{
"Title": "Test <b>World</b>",
"Body": template.URL(a),
}
t.Execute(os.Stdout, v)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG204 - Subprocess auditing
SampleCodeG204 = []CodeSample{{[]string{`
package main
import (
"log"
"os/exec"
"context"
)
func main() {
err := exec.CommandContext(context.Background(), "git", "rev-parse", "--show-toplavel").Run()
if err != nil {
log.Fatal(err)
}
log.Printf("Command finished with error: %v", err)
}`}, 0, gosec.NewConfig()}, {[]string{`
// Calling any function which starts a new process with using
// command line arguments as it's arguments is considered dangerous
package main
import (
"log"
"os"
"os/exec"
)
func main() {
err := exec.CommandContext(context.Background(), os.Args[0], "5").Run()
if err != nil {
log.Fatal(err)
}
log.Printf("Command finished with error: %v", err)
}`}, 1, gosec.NewConfig()}, {[]string{`
// Initializing a local variable using a environmental
// variable is consider as a dangerous user input
package main
import (
"log"
"os"
"os/exec"
)
func main() {
run := "sleep" + os.Getenv("SOMETHING")
cmd := exec.Command(run, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
log.Printf("Command finished with error: %v", err)
}`}, 1, gosec.NewConfig()}, {[]string{`
// gosec doesn't have enough context to decide that the
// command argument of the RunCmd function is harcoded string
// and that's why it's better to warn the user so he can audit it
package main
import (
"log"
"os/exec"
)
func RunCmd(command string) {
cmd := exec.Command(command, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
}
func main() {
RunCmd("sleep")
}`}, 1, gosec.NewConfig()}, {[]string{`
// syscall.Exec function called with harcoded arguments
// shouldn't be consider as a command injection
package main
import (
"fmt"
"syscall"
)
func main() {
err := syscall.Exec("/bin/cat", []string{"/etc/passwd"}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}`}, 0, gosec.NewConfig()},
{[]string{`
package main
import (
"fmt"
"syscall"
)
func RunCmd(command string) {
_, err := syscall.ForkExec(command, []string{}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func main() {
RunCmd("sleep")
}`}, 1, gosec.NewConfig(),
},
{[]string{`
package main
import (
"fmt"
"syscall"
)
func RunCmd(command string) {
_, err := syscall.StartProcess(command, []string{}, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func main() {
RunCmd("sleep")
}`}, 1, gosec.NewConfig(),
},
{[]string{`
// starting a process with a variable as an argument
// even if not constant is not considered as dangerous
// because it has harcoded value
package main
import (
"log"
"os/exec"
)
func main() {
run := "sleep"
cmd := exec.Command(run, "5")
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
log.Printf("Waiting for command to finish...")
err = cmd.Wait()
log.Printf("Command finished with error: %v", err)
}`}, 0, gosec.NewConfig()}}
// SampleCodeG301 - mkdir permission check
SampleCodeG301 = []CodeSample{{[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Mkdir("/tmp/mydir", 0777)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.MkdirAll("/tmp/mydir", 0777)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Mkdir("/tmp/mydir", 0600)
if err != nil {
fmt.Println("Error when creating a directory!")
return
}
}`}, 0, gosec.NewConfig()}}
// SampleCodeG302 - file create / chmod permissions check
SampleCodeG302 = []CodeSample{{[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Chmod("/tmp/somefile", 0777)
if err != nil {
fmt.Println("Error when changing file permissions!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
_, err := os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
fmt.Println("Error opening a file!")
return
}
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
err := os.Chmod("/tmp/mydir", 0400)
if err != nil {
fmt.Println("Error")
return
}
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"os"
)
func main() {
_, err := os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
fmt.Println("Error opening a file!")
return
}
}
`}, 0, gosec.NewConfig()}}
// SampleCodeG303 - bad tempfile permissions & hardcoded shared path
SampleCodeG303 = []CodeSample{{[]string{`
package samples
import (
"fmt"
"io/ioutil"
)
func main() {
err := ioutil.WriteFile("/tmp/demo2", []byte("This is some data"), 0644)
if err != nil {
fmt.Println("Error while writing!")
}
}`}, 1, gosec.NewConfig()}}
// SampleCodeG304 - potential file inclusion vulnerability
SampleCodeG304 = []CodeSample{{[]string{`
package main
import (
"os"
"io/ioutil"
"log"
)
func main() {
f := os.Getenv("tainted_file")
body, err := ioutil.ReadFile(f)
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
title := r.URL.Query().Get("title")
f, err := os.Open(title)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
body := make([]byte, 5)
if _, err = f.Read(body); err != nil {
fmt.Printf("Error: %v\n", err)
}
fmt.Fprintf(w, "%s", body)
})
log.Fatal(http.ListenAndServe(":3000", nil))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"log"
"os"
"io/ioutil"
)
func main() {
f2 := os.Getenv("tainted_file2")
body, err := ioutil.ReadFile("/tmp/" + f2)
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
)
func main() {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Please enter file to read: ")
file, _ := reader.ReadString('\n')
file = file[:len(file)-1]
f, err := os.Open(filepath.Join("/tmp/service/", file))
if err != nil {
fmt.Printf("Error: %v\n", err)
}
contents := make([]byte, 15)
if _, err = f.Read(contents); err != nil {
fmt.Printf("Error: %v\n", err)
}
fmt.Println(string(contents))
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"log"
"os"
"io/ioutil"
"path/filepath"
)
func main() {
dir := os.Getenv("server_root")
f3 := os.Getenv("tainted_file3")
// edge case where both a binary expression and file Join are used.
body, err := ioutil.ReadFile(filepath.Join("/var/"+dir, f3))
if err != nil {
log.Printf("Error: %v\n", err)
}
log.Print(body)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG305 - File path traversal when extracting zip archives
SampleCodeG305 = []CodeSample{{[]string{`
package unzip
import (
"archive/zip"
"io"
"os"
"path/filepath"
)
func unzip(archive, target string) error {
reader, err := zip.OpenReader(archive)
if err != nil {
return err
}
if err := os.MkdirAll(target, 0750); err != nil {
return err
}
for _, file := range reader.File {
path := filepath.Join(target, file.Name)
if file.FileInfo().IsDir() {
os.MkdirAll(path, file.Mode()) // #nosec
continue
}
fileReader, err := file.Open()
if err != nil {
return err
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
}
}
return nil
}`}, 1, gosec.NewConfig()}, {[]string{`
package unzip
import (
"archive/zip"
"io"
"os"
"path/filepath"
)
func unzip(archive, target string) error {
reader, err := zip.OpenReader(archive)
if err != nil {
return err
}
if err := os.MkdirAll(target, 0750); err != nil {
return err
}
for _, file := range reader.File {
archiveFile := file.Name
path := filepath.Join(target, archiveFile)
if file.FileInfo().IsDir() {
os.MkdirAll(path, file.Mode()) // #nosec
continue
}
fileReader, err := file.Open()
if err != nil {
return err
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
}
}
return nil
}`}, 1, gosec.NewConfig()}}
// SampleCodeG306 - Poor permissions for WriteFile
SampleCodeG306 = []CodeSample{
{[]string{`package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
d1 := []byte("hello\ngo\n")
err := ioutil.WriteFile("/tmp/dat1", d1, 0744)
check(err)
allowed := ioutil.WriteFile("/tmp/dat1", d1, 0600)
check(allowed)
f, err := os.Create("/tmp/dat2")
check(err)
defer f.Close()
d2 := []byte{115, 111, 109, 101, 10}
n2, err := f.Write(d2)
defer check(err)
fmt.Printf("wrote %d bytes\n", n2)
n3, err := f.WriteString("writes\n")
fmt.Printf("wrote %d bytes\n", n3)
f.Sync()
w := bufio.NewWriter(f)
n4, err := w.WriteString("buffered\n")
fmt.Printf("wrote %d bytes\n", n4)
w.Flush()
}`}, 1, gosec.NewConfig()}}
// SampleCodeG307 - Unsafe defer of os.Close
SampleCodeG307 = []CodeSample{
{[]string{`package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func main() {
d1 := []byte("hello\ngo\n")
err := ioutil.WriteFile("/tmp/dat1", d1, 0744)
check(err)
allowed := ioutil.WriteFile("/tmp/dat1", d1, 0600)
check(allowed)
f, err := os.Create("/tmp/dat2")
check(err)
defer f.Close()
d2 := []byte{115, 111, 109, 101, 10}
n2, err := f.Write(d2)
defer check(err)
fmt.Printf("wrote %d bytes\n", n2)
n3, err := f.WriteString("writes\n")
fmt.Printf("wrote %d bytes\n", n3)
f.Sync()
w := bufio.NewWriter(f)
n4, err := w.WriteString("buffered\n")
fmt.Printf("wrote %d bytes\n", n4)
w.Flush()
}`}, 1, gosec.NewConfig()}}
// SampleCodeG401 - Use of weak crypto MD5
SampleCodeG401 = []CodeSample{
{[]string{`
package main
import (
"crypto/md5"
"fmt"
"io"
"log"
"os"
)
func main() {
f, err := os.Open("file.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
defer func() {
err := f.Close()
if err != nil {
log.Printf("error closing the file: %s", err)
}
}()
h := md5.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
fmt.Printf("%x", h.Sum(nil))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG401b - Use of weak crypto SHA1
SampleCodeG401b = []CodeSample{
{[]string{`
package main
import (
"crypto/sha1"
"fmt"
"io"
"log"
"os"
)
func main() {
f, err := os.Open("file.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
fmt.Printf("%x", h.Sum(nil))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG402 - TLS settings
SampleCodeG402 = []CodeSample{{[]string{`
// InsecureSkipVerify
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 1, gosec.NewConfig()}, {[]string{
`
// Insecure minimum version
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MinVersion: 0},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 1, gosec.NewConfig()}, {[]string{`
// Insecure max version
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{MaxVersion: 0},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}
`}, 1, gosec.NewConfig()}, {
[]string{`
// Insecure ciphersuite selection
package main
import (
"crypto/tls"
"fmt"
"net/http"
)
func main() {
tr := &http.Transport{
TLSClientConfig: &tls.Config{CipherSuites: []uint16{
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
},},
}
client := &http.Client{Transport: tr}
_, err := client.Get("https://golang.org/")
if err != nil {
fmt.Println(err)
}
}`}, 1, gosec.NewConfig()}}
// SampleCodeG403 - weak key strength
SampleCodeG403 = []CodeSample{
{[]string{`
package main
import (
"crypto/rand"
"crypto/rsa"
"fmt"
)
func main() {
//Generate Private Key
pvk, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
fmt.Println(err)
}
fmt.Println(pvk)
}`}, 1, gosec.NewConfig()}}
// SampleCodeG404 - weak random number
SampleCodeG404 = []CodeSample{
{[]string{`
package main
import "crypto/rand"
func main() {
good, _ := rand.Read(nil)
println(good)
}`}, 0, gosec.NewConfig()}, {[]string{`
package main
import "math/rand"
func main() {
bad := rand.Int()
println(bad)
}`}, 1, gosec.NewConfig()}, {[]string{`
package main
import (
"crypto/rand"
mrand "math/rand"
)
func main() {
good, _ := rand.Read(nil)
println(good)
i := mrand.Int31()
println(i)
}`}, 0, gosec.NewConfig()}}
// SampleCodeG501 - Blacklisted import MD5
SampleCodeG501 = []CodeSample{
{[]string{`
package main
import (
"crypto/md5"
"fmt"
"os"
)
func main() {
for _, arg := range os.Args {
fmt.Printf("%x - %s\n", md5.Sum([]byte(arg)), arg)
}
}`}, 1, gosec.NewConfig()}}
// SampleCodeG502 - Blacklisted import DES
SampleCodeG502 = []CodeSample{
{[]string{`
package main
import (
"crypto/cipher"
"crypto/des"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
)
func main() {
block, err := des.NewCipher([]byte("sekritz"))
if err != nil {
panic(err)
}
plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ")
ciphertext := make([]byte, des.BlockSize+len(plaintext))
iv := ciphertext[:des.BlockSize]
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic(err)
}
stream := cipher.NewCFBEncrypter(block, iv)
stream.XORKeyStream(ciphertext[des.BlockSize:], plaintext)
fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG503 - Blacklisted import RC4
SampleCodeG503 = []CodeSample{{[]string{`
package main
import (
"crypto/rc4"
"encoding/hex"
"fmt"
)
func main() {
cipher, err := rc4.NewCipher([]byte("sekritz"))
if err != nil {
panic(err)
}
plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ")
ciphertext := make([]byte, len(plaintext))
cipher.XORKeyStream(ciphertext, plaintext)
fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG504 - Blacklisted import CGI
SampleCodeG504 = []CodeSample{{[]string{`
package main
import (
"net/http/cgi"
"net/http"
)
func main() {
cgi.Serve(http.FileServer(http.Dir("/usr/share/doc")))
}`}, 1, gosec.NewConfig()}}
// SampleCodeG505 - Blacklisted import SHA1
SampleCodeG505 = []CodeSample{
{[]string{`
package main
import (
"crypto/sha1"
"fmt"
"os"
)
func main() {
for _, arg := range os.Args {
fmt.Printf("%x - %s\n", sha1.Sum([]byte(arg)), arg)
}
}`}, 1, gosec.NewConfig()}}
// SampleCode601 - Go build tags
SampleCode601 = []CodeSample{{[]string{`
// +build tag
package main
func main() {
fmt.Println("no package imported error")
}`}, 1, gosec.NewConfig()}}
// SampleCodeCgo - Cgo file sample
SampleCodeCgo = []CodeSample{{[]string{`
package main
import (
"fmt"
"unsafe"
)
/*
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int printData(unsigned char *data) {
return printf("cData: %lu \"%s\"\n", (long unsigned int)strlen(data), data);
}
*/
import "C"
func main() {
// Allocate C data buffer.
width, height := 8, 2
lenData := width * height
// add string terminating null byte
cData := (*C.uchar)(C.calloc(C.size_t(lenData+1), C.sizeof_uchar))
// When no longer in use, free C allocations.
defer C.free(unsafe.Pointer(cData))
// Go slice reference to C data buffer,
// minus string terminating null byte
gData := (*[1 << 30]byte)(unsafe.Pointer(cData))[:lenData:lenData]
// Write and read cData via gData.
for i := range gData {
gData[i] = '.'
}
copy(gData[0:], "Data")
gData[len(gData)-1] = 'X'
fmt.Printf("gData: %d %q\n", len(gData), gData)
C.printData(cData)
}
`}, 0, gosec.NewConfig()}}
)
|
[
"\"tainted_url\"",
"\"SOMETHING\"",
"\"tainted_file\"",
"\"tainted_file2\"",
"\"server_root\"",
"\"tainted_file3\""
] |
[] |
[
"tainted_url",
"tainted_file",
"tainted_file3",
"tainted_file2",
"SOMETHING",
"server_root"
] |
[]
|
["tainted_url", "tainted_file", "tainted_file3", "tainted_file2", "SOMETHING", "server_root"]
|
go
| 6 | 0 | |
metrics_test.go
|
package devstats
import (
"database/sql"
"fmt"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"testing"
"time"
lib "devstats"
testlib "devstats/test"
yaml "gopkg.in/yaml.v2"
)
// metricTestCase - used to test single metric
// Setups are called to create database entries for metric to return results
// metric - metrics/{{metric}}.sql file is used to run metric, inside file {{from}} and {{to}} are replaced with from, to
// from, to - used as data range when calling metric
// expected - we're expecting this result from metric, it can either be a single row with single column numeric value
// or multiple rows, each containing metric name and its numeric value
type metricTestCase struct {
Setups []reflect.Value
Metric string `yaml:"metric"`
From time.Time `yaml:"from"` // used by non-histogram metrics
To time.Time `yaml:"to"` // used by non-histogram metrics
Period string `yaml:"period"` // used by histogram metrics
N int `yaml:"n"` // used by metrics that use moving periods
DebugDB bool `yaml:"debug"` // if set, test will not drop database at the end and will return after such test, so You can run metric manually via `runq` or directly on DB
Replaces [][]string `yaml:"replaces"`
Expected [][]interface{} `yaml:"expected"`
SetupNames []string `yaml:"additional_setup_funcs"`
SetupArgs []string `yaml:"additional_setup_args"`
DataName string `yaml:"data"`
}
// Tests set for single project
type projectMetricTestCase struct {
ProjectName string `yaml:"project_name"`
Tests []metricTestCase `yaml:"tests"`
}
// Test YAML struct (for all projects)
type metricTests struct {
Projects []projectMetricTestCase `yaml:"projects"`
Data map[string]map[string][][]interface{} `yaml:"data"`
}
// Tests all metrics
func TestMetrics(t *testing.T) {
// Environment context parse
var ctx lib.Ctx
ctx.Init()
// Do not allow to run tests in "gha" database
if ctx.PgDB != "dbtest" {
t.Errorf("tests can only be run on \"dbtest\" database")
return
}
// We need to know project to test
if ctx.Project == "" {
t.Errorf("you need to set project via GHA2DB_PROJECT=project_name (one of projects from projects.yaml)")
}
// Load test cases
var tests metricTests
data, err := lib.ReadFile(&ctx, ctx.TestsYaml)
if err != nil {
lib.FatalOnError(err)
return
}
lib.FatalOnError(yaml.Unmarshal(data, &tests))
// Read per project test cases
testCases := []metricTestCase{}
for _, project := range tests.Projects {
if project.ProjectName == ctx.Project {
testCases = project.Tests
break
}
}
if len(testCases) < 1 {
t.Errorf("no tests defined for '%s' project", ctx.Project)
}
// Only selected metrics?
testMetrics := os.Getenv("TEST_METRICS")
selected := false
selectedMetrics := make(map[string]struct{})
if testMetrics != "" {
selected = true
ary := strings.Split(testMetrics, ",")
for _, m := range ary {
selectedMetrics[m] = struct{}{}
}
}
// Execute test cases
for index, test := range testCases {
if selected {
_, ok := selectedMetrics[test.Metric]
if !ok {
continue
}
}
prepareMetricTestCase(&test)
got, err := executeMetricTestCase(&test, &tests, &ctx)
if err != nil {
t.Errorf("test number %d (%s): %v", index+1, test.Metric, err.Error())
}
if !testlib.CompareSlices2D(test.Expected, got) {
t.Errorf("test number %d (%s), expected:\n%+v\n%+v\ngot test case: %+v", index+1, test.Metric, test.Expected, got, test)
}
if test.DebugDB {
t.Errorf("returning due to debugDB mode")
return
}
}
}
// This prepares raw YAML metric test to be executed:
// Binds additional setup function(s)
// if test uses "additional_setup_funcs", "additional_setup_args" section(s)
func prepareMetricTestCase(testMetric *metricTestCase) {
if len(testMetric.SetupNames) < 1 {
return
}
reflectTestMetric := reflect.ValueOf(*testMetric)
for _, setupName := range testMetric.SetupNames {
method := reflectTestMetric.MethodByName(setupName)
testMetric.Setups = append(testMetric.Setups, method)
}
}
// This prepares raw metric test to be executed:
// Generates data if test uses "data" section
func dataForMetricTestCase(con *sql.DB, ctx *lib.Ctx, testMetric *metricTestCase, tests *metricTests) (err error) {
if testMetric.DataName != "" {
data, ok := tests.Data[testMetric.DataName]
if !ok {
err = fmt.Errorf("No data key for \"%s\" in \"data\" section of \"%s\"", testMetric.DataName, ctx.TestsYaml)
return
}
events, ok := data["events"]
if ok {
// Add events
for _, event := range events {
err = addEvent(con, ctx, event...)
if err != nil {
return
}
}
}
repos, ok := data["repos"]
if ok {
// Add repos
for _, repo := range repos {
err = addRepo(con, ctx, repo...)
if err != nil {
return
}
}
}
iels, ok := data["issues_events_labels"]
if ok {
for _, iel := range iels {
err = addIssueEventLabel(con, ctx, iel...)
if err != nil {
return
}
}
}
texts, ok := data["texts"]
if ok {
textsAppend, okAppend := data["texts_append"]
for idx, text := range texts {
if okAppend {
text = append(text, textsAppend[idx%len(textsAppend)]...)
}
err = addText(con, ctx, text...)
if err != nil {
return
}
}
}
prs, ok := data["prs"]
if ok {
prsAppend, okAppend := data["prs_append"]
for idx, pr := range prs {
if okAppend {
pr = append(pr, prsAppend[idx%len(prsAppend)]...)
}
err = addPR(con, ctx, pr...)
if err != nil {
return
}
}
}
issuesLabels, ok := data["issues_labels"]
if ok {
for _, issueLabel := range issuesLabels {
err = addIssueLabel(con, ctx, issueLabel...)
if err != nil {
return
}
}
}
issues, ok := data["issues"]
if ok {
issuesAppend, okAppend := data["issues_append"]
for idx, issue := range issues {
if okAppend {
issue = append(issue, issuesAppend[idx%len(issuesAppend)]...)
}
err = addIssue(con, ctx, issue...)
if err != nil {
return
}
}
}
comments, ok := data["comments"]
if ok {
for _, comment := range comments {
err = addComment(con, ctx, comment...)
if err != nil {
return
}
}
}
commits, ok := data["commits"]
if ok {
for _, commit := range commits {
err = addCommit(con, ctx, commit...)
if err != nil {
return
}
}
}
affiliations, ok := data["affiliations"]
if ok {
for _, affiliation := range affiliations {
err = addActorAffiliation(con, ctx, affiliation...)
if err != nil {
return
}
}
}
actors, ok := data["actors"]
if ok {
for _, actor := range actors {
err = addActor(con, ctx, actor...)
if err != nil {
return
}
}
}
iprs, ok := data["issues_prs"]
if ok {
for _, ipr := range iprs {
err = addIssuePR(con, ctx, ipr...)
if err != nil {
return
}
}
}
payloads, ok := data["payloads"]
if ok {
for _, payload := range payloads {
err = addPayload(con, ctx, payload...)
if err != nil {
return
}
}
}
forkees, ok := data["forkees"]
if ok {
for _, forkee := range forkees {
err = addForkee(con, ctx, forkee...)
if err != nil {
return
}
}
}
ecfs, ok := data["events_commits_files"]
if ok {
for _, ecf := range ecfs {
err = addEventCommitFile(con, ctx, ecf...)
if err != nil {
return
}
}
}
milestones, ok := data["milestones"]
if ok {
for _, milestone := range milestones {
err = addMilestone(con, ctx, milestone...)
if err != nil {
return
}
}
}
}
return
}
// This executes test of single metric
// All metric data is defined in "testMetric" argument
// Singel metric test is dropping & creating database from scratch (to avoid junky database)
// It also creates full DB structure - without indexes - they're not needed in
// small databases - like the ones created by test covergae tools
func executeMetricTestCase(testMetric *metricTestCase, tests *metricTests, ctx *lib.Ctx) (result [][]interface{}, err error) {
// Drop database if exists
lib.DropDatabaseIfExists(ctx)
// Create database if needed
createdDatabase := lib.CreateDatabaseIfNeeded(ctx)
if !createdDatabase {
err = fmt.Errorf("failed to create database \"%s\"", ctx.PgDB)
return
}
// Drop database after tests
if !testMetric.DebugDB {
// Drop database after tests
defer func() { lib.DropDatabaseIfExists(ctx) }()
}
// Connect to Postgres DB
c := lib.PgConn(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
// Create DB structure
lib.Structure(ctx)
// Setup test data
err = dataForMetricTestCase(c, ctx, testMetric, tests)
if err != nil {
return
}
// Execute metrics additional setup(s) function
lenArgs := len(testMetric.SetupArgs)
for index, setup := range testMetric.Setups {
setupArgs := ""
if index < lenArgs {
setupArgs = testMetric.SetupArgs[index]
}
args := []reflect.Value{reflect.ValueOf(c), reflect.ValueOf(ctx), reflect.ValueOf(setupArgs)}
switch ret := setup.Call(args)[0].Interface().(type) {
case error:
err = ret
}
if err != nil {
return
}
}
// Execute metric and get its results
result, err = executeMetric(
c,
ctx,
testMetric.Metric,
testMetric.From,
testMetric.To,
testMetric.Period,
testMetric.N,
testMetric.Replaces,
)
return
}
// execute metric metrics/{{metric}}.sql with {{from}} and {{to}} replaced by from/YMDHMS, to/YMDHMS
// end result slice of slices of any type
func executeMetric(c *sql.DB, ctx *lib.Ctx, metric string, from, to time.Time, period string, n int, replaces [][]string) (result [][]interface{}, err error) {
// Metric file name
sqlFile := fmt.Sprintf("metrics/%s/%s.sql", ctx.Project, metric)
// Read and transform SQL file.
bytes, err := lib.ReadFile(ctx, sqlFile)
if err != nil {
return
}
sqlQuery := string(bytes)
if from.Year() >= 1980 {
sqlQuery = strings.Replace(sqlQuery, "{{from}}", lib.ToYMDHMSDate(from), -1)
}
if to.Year() >= 1980 {
sqlQuery = strings.Replace(sqlQuery, "{{to}}", lib.ToYMDHMSDate(to), -1)
}
sqlQuery = strings.Replace(sqlQuery, "{{period}}", period, -1)
sqlQuery = strings.Replace(sqlQuery, "{{n}}", strconv.Itoa(n)+".0", -1)
sqlQuery = strings.Replace(
sqlQuery,
"{{exclude_bots}}",
"not like all(array['googlebot', 'rktbot', 'coveralls', 'k8s-%', '%-bot', '%-robot', "+
"'bot-%', 'robot-%', '%[bot]%', '%-jenkins', '%-ci%bot', '%-testing', 'codecov-%'])",
-1,
)
for _, replace := range replaces {
if len(replace) != 2 {
err = fmt.Errorf("replace(s) should have length 2, invalid: %+v", replace)
return
}
sqlQuery = strings.Replace(sqlQuery, replace[0], replace[1], -1)
}
qrFrom := ""
qrTo := ""
if from.Year() >= 1980 {
qrFrom = lib.ToYMDHMSDate(from)
}
if to.Year() >= 1980 {
qrTo = lib.ToYMDHMSDate(to)
}
sqlQuery = lib.PrepareQuickRangeQuery(sqlQuery, period, qrFrom, qrTo)
// Execute SQL
rows := lib.QuerySQLWithErr(c, ctx, sqlQuery)
defer func() { lib.FatalOnError(rows.Close()) }()
// Now unknown rows, with unknown types
columns, err := rows.Columns()
if err != nil {
return
}
// Vals to hold any type as []interface{}
vals := make([]interface{}, len(columns))
for i := range columns {
vals[i] = new(sql.RawBytes)
}
// Get results into slices of slices of any type
var results [][]interface{}
for rows.Next() {
err = rows.Scan(vals...)
if err != nil {
return
}
// We need to iterate row and get columns types
rowSlice := []interface{}{}
for _, val := range vals {
var value interface{}
if val != nil {
value = string(*val.(*sql.RawBytes))
iValue, err := strconv.Atoi(value.(string))
if err == nil {
value = iValue
}
}
rowSlice = append(rowSlice, value)
}
results = append(results, rowSlice)
}
err = rows.Err()
if err != nil {
return
}
result = results
return
}
// Add event
// eid, etype, aid, rid, public, created_at, aname, rname, orgid
func addEvent(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 9 {
err = fmt.Errorf("addEvent: expects 9 variadic parameters")
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_events("+
"id, type, actor_id, repo_id, public, created_at, "+
"dup_actor_login, dup_repo_name, org_id) "+lib.NValues(9),
args...,
)
return
}
// Add repo
// id, name, org_id, org_login, repo_group
func addRepo(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 5 {
err = fmt.Errorf("addRepo: expects 5 variadic parameters")
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_repos(id, name, org_id, org_login, repo_group) "+lib.NValues(5),
args...,
)
return
}
// Add forkee
// forkee_id, event_id, name, full_name, owner_id, created_at, updated_at
// org, stargazers/watchers, forks, open_issues,
// actor_id, actor_login, repo_id, repo_name, type, owner_login
func addForkee(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 17 {
err = fmt.Errorf("addForkee: expects 17 variadic parameters")
return
}
newArgs := lib.AnyArray{
args[0], // forkee_id
args[1], // event_id
args[2], // name
args[3], // full_name
args[4], // owner_id
"description",
false, // fork
args[5], // created_at
args[6], // updated_at
time.Now(), // pushed_at
"www.homepage.com",
1, // size
"Golang", // language
args[7], // org
args[8], // stargazers
true, // has_issues
nil, // has_projects
true, // has_downloads
true, // has_wiki
nil, // has_pages
args[9], // forks
"master", // default_branch
args[10], // open_issues
args[8], // watchers
false, // private
args[11], // dup_actor_id
args[12], // dup_actor_login
args[13], // dup_repo_id
args[14], // dup_repo_name
args[15], // dup_type
args[5], // dup_created_at
args[16], // dup_owner_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_forkees("+
"id, event_id, name, full_name, owner_id, description, fork, "+
"created_at, updated_at, pushed_at, homepage, size, language, organization, "+
"stargazers_count, has_issues, has_projects, has_downloads, "+
"has_wiki, has_pages, forks, default_branch, open_issues, watchers, public, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_owner_login) "+lib.NValues(32),
newArgs...,
)
return
}
// Add actor
// id, login, name
func addActor(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 3 {
err = fmt.Errorf("addActor: expects 3 variadic parameters")
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_actors(id, login, name) "+lib.NValues(3),
args...,
)
return
}
// Add actor affiliation
// actor_id, company_name, dt_from, dt_to
func addActorAffiliation(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 4 {
err = fmt.Errorf("addActorAffiliation: expects 4 variadic parameters")
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_actors_affiliations(actor_id, company_name, dt_from, dt_to) "+lib.NValues(4),
args...,
)
return
}
// Add issue event label
// iid, eid, lid, lname, created_at
func addIssueEventLabel(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 11 {
err = fmt.Errorf("addIssueEventLabel: expects 11 variadic parameters, got %v", len(args))
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_events_labels("+
"issue_id, event_id, label_id, label_name, created_at, "+
"repo_id, repo_name, actor_id, actor_login, type, issue_number"+
") "+lib.NValues(11),
args...,
)
return
}
// Add events commits files
// sha, eid, path, size, dt, repo_group,
// dup_repo_id, dup_repo_name, dup_type, dup_created_at
func addEventCommitFile(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 10 {
err = fmt.Errorf("addEventCommitFile: expects 10 variadic parameters, got %v", len(args))
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_events_commits_files("+
"sha, event_id, path, size, dt, repo_group, "+
"dup_repo_id, dup_repo_name, dup_type, dup_created_at"+
") "+lib.NValues(10),
args...,
)
return
}
// Add issue label
// iid, eid, lid, actor_id, actor_login, repo_id, repo_name,
// ev_type, ev_created_at, issue_number, label_name
func addIssueLabel(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 11 {
err = fmt.Errorf("addIssueLabel: expects 11 variadic parameters, got %v", len(args))
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_labels(issue_id, event_id, label_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_issue_number, dup_label_name"+
") "+lib.NValues(11),
args...,
)
return
}
// Add text
// eid, body, created_at
// repo_id, repo_name, actor_id, actor_login, type
func addText(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 8 {
err = fmt.Errorf("addText: expects 8 variadic parameters")
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_texts("+
"event_id, body, created_at, "+
"repo_id, repo_name, actor_id, actor_login, type"+
") "+lib.NValues(8),
args...,
)
return
}
// Add commit
// sha, event_id, author_name, message, dup_actor_id, dup_actor_login,
// dup_repo_id, dup_repo_name, dup_type, dup_created_at
func addCommit(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 10 {
err = fmt.Errorf("addCommit: expects 10 variadic parameters")
return
}
// New args
newArgs := lib.AnyArray{
args[0], // sha
args[1], // event_id
args[2], // author_name
args[3], // message
true, // is_distinct
args[4], // dup_actor_id
args[5], // dup_actor_login
args[6], // dup_repo_id
args[7], // dup_repo_name
args[8], // dup_type
args[9], // dup_created_at
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_commits("+
"sha, event_id, author_name, message, is_distinct, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at"+
") "+lib.NValues(11),
newArgs...,
)
return
}
// Add comment
// id, event_id, body, created_at, user_id, repo_id, repo_name, actor_id, actor_login, type
func addComment(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 10 {
err = fmt.Errorf("addComment: expects 10 variadic parameters")
return
}
// New args
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // body
args[3], // created_at
time.Now(), // updated_at
args[4], // user_id
nil, // commit_id
nil, // original_commit_id
nil, // diff_hunk
nil, // position
nil, // original_position
nil, // path
nil, // pull_request_review_ai
nil, // line
args[7], // actor_id
args[8], // actor_login
args[5], // repo_id
args[6], // repo_name
args[9], // type
args[3], // dup_created_at
args[6], // dup_user_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_comments("+
"id, event_id, body, created_at, updated_at, user_id, "+
"commit_id, original_commit_id, diff_hunk, position, "+
"original_position, path, pull_request_review_id, line, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login) "+lib.NValues(21),
newArgs...,
)
return
}
// Add payload
// event_id, issue_id, pull_request_id, comment_id, number, forkee_id, release_id, member_id
// actor_id, actor_login, repo_id, repo_name, event_type, event_created_at
func addPayload(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 14 {
err = fmt.Errorf("addPayload: expects 14 variadic parameters")
return
}
newArgs := lib.AnyArray{
args[0], // event_id
nil, // push_id, size, ref, head, befor
nil,
nil,
nil,
nil,
"created", // action
args[1], // issue_id
args[2], // pull_request_id
args[3], // comment_id
nil, // ref_type, master_branch, commit
nil,
nil,
"desc", // description
args[4], // number
args[5], // forkee_id
args[6], // release_id
args[7], // member_id
args[8], // actor.ID
args[9], // actor.Login
args[10], // repo.ID
args[11], // repo.Name
args[12], // event.Type
args[13], // event.CreatedAt
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_payloads("+
"event_id, push_id, size, ref, head, befor, action, "+
"issue_id, pull_request_id, comment_id, ref_type, master_branch, commit, "+
"description, number, forkee_id, release_id, member_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at"+
") "+lib.NValues(24),
newArgs...,
)
return
}
// Add PR
// prid, eid, uid, merged_id, assignee_id, num, state, title, body, created_at, closed_at, merged_at, merged
// repo_id, repo_name, actor_id, actor_login, updated_at
func addPR(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 18 {
err = fmt.Errorf("addPR: expects 18 variadic parameters, got %v", len(args))
return
}
newArgs := lib.AnyArray{
args[0], // PR.id
args[1], // event.ID
args[2], // user.ID
"250aac33d5aae922aac08bba4f06bd139c1c8994", // base SHA
"9c31bcbc683a491c3d4122adcfe4caaab6e2d0fc", // head SHA
args[3], // MergedBy.ID
args[4], // Assignee.ID
nil,
args[5], // PR.Number
args[6], // PR.State (open, closed)
false, // PR.Locked
args[7], // PR.Title
args[8], // PR.Body
args[9], // PR.CreatedAt
args[17], // PR.UpdatedAt
args[10], // PR.ClosedAt
args[11], // PR.MergedAt
"9c31bcbc683a491c3d4122adcfe4caaab6e2d0fc", // PR.MergeCommitSHA
args[12], // PR.Merged
true, // PR.mergable
true, // PR.Rebaseable
"clean", // PR.MergeableState (nil, unknown, clean, unstable, dirty)
1, // PR.Comments
1, // PR.ReviewComments
true, // PR.MaintainerCanModify
1, // PR.Commits
1, // PR.additions
1, // PR.Deletions
1, // PR.ChangedFiles
args[15], // Duplicate data starts here: ev.Actor.ID
args[16], // ev.Actor.Login
args[13], // ev.Repo.ID
args[14], // ev.Repo.Name
"T", // ev.Type
time.Now(), // ev.CreatedAt
"", // PR.User.Login
nil, // PR.Assignee.Login
nil, // PR.MergedBy.Login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_pull_requests("+
"id, event_id, user_id, base_sha, head_sha, merged_by_id, assignee_id, milestone_id, "+
"number, state, locked, title, body, created_at, updated_at, closed_at, merged_at, "+
"merge_commit_sha, merged, mergeable, rebaseable, mergeable_state, comments, "+
"review_comments, maintainer_can_modify, commits, additions, deletions, changed_files, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login, dupn_assignee_login, dupn_merged_by_login) "+lib.NValues(38),
newArgs...,
)
return
}
// Add Issue PR
// issue_id, pr_id, number, repo_id, repo_name, created_at
func addIssuePR(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 6 {
err = fmt.Errorf("addIssuePR: expects 6 variadic parameters, got %v", len(args))
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_pull_requests("+
"issue_id, pull_request_id, number, repo_id, repo_name, created_at"+
") "+lib.NValues(6),
args...,
)
return
}
// Add Issue
// id, event_id, assignee_id, body, closed_at, created_at, number, state, title, updated_at
// user_id, dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type,
// is_pull_request, milestone_id, dup_created_at
func addIssue(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 19 {
err = fmt.Errorf("addIssue: expects 19 variadic parameters, got %v", len(args))
return
}
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // assignee_id
args[3], // body
args[4], // closed_at
0, // comments
args[5], // created_at
false, // locked
args[17], // milestone_id
args[6], // number
args[7], // state
args[8], // title
args[9], // updated_at
args[10], // user_id
args[11], // dup_actor_id
args[12], // dup_actor_login
args[13], // dup_repo_id
args[14], // dup_repo_name
args[15], // dup_type
args[18], // dup_created_at
"", // dup_user_login
"", // dup_assignee_login
args[16], // is_pull_request
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues("+
"id, event_id, assignee_id, body, closed_at, comments, created_at, "+
"locked, milestone_id, number, state, title, updated_at, user_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login, dupn_assignee_login, is_pull_request) "+lib.NValues(23),
newArgs...,
)
return
}
// Add Milestone
// id, event_id, closed_at, created_at, actor_id, due_on, number, state, title, updated_at
// dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at
func addMilestone(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 16 {
err = fmt.Errorf("addMilestone: expects 16 variadic parameters, got %v", len(args))
return
}
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // closed_at
0, // closed issues
args[3], // created_at
args[4], // actor_id
"", // description
args[5], // due_on
args[6], // number
0, // open issues
args[7], // state
args[8], // title
args[9], // updated_at
args[10], // dup_actor_id
args[11], // dup_actor_login
args[12], // dup_repo_id
args[13], // dup_repo_name
args[14], // dup_type
args[15], // dup_created_at
"", // dup_creator_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_milestones("+
"id, event_id, closed_at, closed_issues, created_at, creator_id, "+
"description, due_on, number, open_issues, state, title, updated_at, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dupn_creator_login) "+lib.NValues(20),
newArgs...,
)
return
}
// Helper function - save data structure to YAML
// Used when migrating test coverage from go source to yaml file
func interfaceToYaml(fn string, i *[][]interface{}) (err error) {
yml, err := yaml.Marshal(i)
lib.FatalOnError(err)
lib.FatalOnError(ioutil.WriteFile(fn, yml, 0644))
return
}
// Set dynamic dates after loaded static YAML data
func (metricTestCase) SetDates(con *sql.DB, ctx *lib.Ctx, arg string) (err error) {
//err = fmt.Errorf("got '%s'", arg)
//return
updates := strings.Split(arg, ",")
for _, update := range updates {
ary := strings.Split(update, ";")
dt := "1980-01-01"
if len(ary) > 3 {
dt = ary[3]
}
query := fmt.Sprintf(
"update %s set %s = %s where date(%s) = '%s'",
ary[0],
ary[1],
ary[2],
ary[1],
dt,
)
_, err = lib.ExecSQL(
con,
ctx,
query,
)
}
return
}
// Sets Repo alias to be the same as Name on all repos
func (metricTestCase) UpdateRepoAliasFromName(con *sql.DB, ctx *lib.Ctx, arg string) (err error) {
_, err = lib.ExecSQL(con, ctx, "update gha_repos set alias = name")
lib.FatalOnError(err)
return
}
// Create dynamic data for affiliations metric after loaded static YAML data
func (metricTestCase) AffiliationsTestHelper(con *sql.DB, ctx *lib.Ctx, arg string) (err error) {
ft := testlib.YMDHMS
// Activities counted
etypes := []string{
"PullRequestReviewCommentEvent",
"PushEvent",
"PullRequestEvent",
"IssuesEvent",
"IssueCommentEvent",
"CommitCommentEvent",
}
// Date ranges (two dates are outside metric area)
dates := []time.Time{}
dt := ft(2017, 8, 31)
dtTo := ft(2017, 10, 2)
for dt.Before(dtTo) || dt.Equal(dtTo) {
dates = append(dates, dt)
dt = lib.NextDayStart(dt)
}
// Will hold all events generated
events := [][]interface{}{}
eid := 1
for _, aid := range []string{"1", "2", "3"} {
for _, etype := range etypes {
for _, dt := range dates {
// Events to add
// eid, etype, aid, rid, public, created_at, aname, rname, orgid
events = append(events, []interface{}{eid, etype, aid, 0, true, dt, "A" + aid, "R", nil})
eid++
}
}
}
// Add events
for _, event := range events {
err = addEvent(con, ctx, event...)
if err != nil {
return
}
}
return
}
|
[
"\"TEST_METRICS\""
] |
[] |
[
"TEST_METRICS"
] |
[]
|
["TEST_METRICS"]
|
go
| 1 | 0 | |
sdk/textanalytics/azure-ai-textanalytics/samples/sample_model_version.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_model_version.py
DESCRIPTION:
This sample demonstrates how to set the model_version for pre-built Text Analytics models.
Recognize entities is used in this sample, but the concept applies generally to all pre-built Text Analytics models.
By default, model_version is set to "latest". This indicates that the latest generally available version
of the model will be used. Model versions are date based, e.g "2021-06-01".
See the documentation for a list of all model versions:
https://docs.microsoft.com/azure/cognitive-services/language-service/named-entity-recognition/how-to-call#specify-the-ner-model
USAGE:
python sample_model_version.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
def sample_model_version():
print("--------------Choosing model_version sample--------------")
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient, RecognizeEntitiesAction
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credential=AzureKeyCredential(key))
documents = [
"I work for Foo Company, and we hired Contoso for our annual founding ceremony. The food \
was amazing and we all can't say enough good words about the quality and the level of service."
]
print("\nSetting model_version='latest' with recognize_entities")
result = text_analytics_client.recognize_entities(documents, model_version="latest")
result = [review for review in result if not review.is_error]
print("...Results of Recognize Entities:")
for review in result:
for entity in review.entities:
print("......Entity '{}' has category '{}'".format(entity.text, entity.category))
print("\nSetting model_version='latest' with recognize entities action in begin_analyze_actions")
poller = text_analytics_client.begin_analyze_actions(
documents,
actions=[
RecognizeEntitiesAction(model_version="latest")
]
)
print("...Results of Recognize Entities Action:")
document_results = poller.result()
for action_results in document_results:
recognize_entities_result = action_results[0]
if recognize_entities_result.is_error:
print("......Is an error with code '{}' and message '{}'".format(
recognize_entities_result.code, recognize_entities_result.message
))
else:
for entity in recognize_entities_result.entities:
print("......Entity '{}' has category '{}'".format(entity.text, entity.category))
if __name__ == '__main__':
sample_model_version()
|
[] |
[] |
[
"AZURE_TEXT_ANALYTICS_ENDPOINT",
"AZURE_TEXT_ANALYTICS_KEY"
] |
[]
|
["AZURE_TEXT_ANALYTICS_ENDPOINT", "AZURE_TEXT_ANALYTICS_KEY"]
|
python
| 2 | 0 | |
plasma_link_django/wsgi.py
|
"""
WSGI config for plasma_link_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plasma_link_django.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
userbot/__init__.py
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", 863789")
API_HASH = os.environ.get("API_HASH", 8dd97d477b9ff5ccc5caf3e46fb70032")
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "True"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APPNAME = os.environ.get("HEROKU_APPNAME", None)
HEROKU_APIKEY = os.environ.get("HEROKU_APIKEY", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/mkaraniya/OpenUserBot.git")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", @Iamshubham)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", "INDIA"))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/yshalsager/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
|
[] |
[] |
[
"",
"GOOGLE_CHROME_BIN",
"G_DRIVE_CLIENT_SECRET",
"LYDIA_API_KEY",
"COUNTRY",
"LASTFM_API",
"ANTI_SPAMBOT_SHOUT",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"HEROKU_APPNAME",
"BIO_PREFIX",
"LOGSPAMMER",
"TZ_NUMBER",
"LASTFM_PASSWORD",
"DATABASE_URL",
"GDRIVE_FOLDER_ID",
"HEROKU_APIKEY",
"CHROME_DRIVER",
"YOUTUBE_API_KEY",
"HEROKU_MEMEZ",
"LASTFM_USERNAME",
"G_DRIVE_CLIENT_ID",
"API_KEY",
"PM_AUTO_BAN",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"LASTFM_SECRET",
"G_DRIVE_AUTH_TOKEN_DATA",
"WEATHER_DEFCITY",
"STRING_SESSION",
"CONSOLE_LOGGER_VERBOSE",
"ALIVE_NAME",
"BOTLOG_CHATID",
"TMP_DOWNLOAD_DIRECTORY",
"CLEAN_WELCOME",
"REM_BG_API_KEY",
"BOTLOG",
"API_HASH"
] |
[]
|
["", "GOOGLE_CHROME_BIN", "G_DRIVE_CLIENT_SECRET", "LYDIA_API_KEY", "COUNTRY", "LASTFM_API", "ANTI_SPAMBOT_SHOUT", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "HEROKU_APPNAME", "BIO_PREFIX", "LOGSPAMMER", "TZ_NUMBER", "LASTFM_PASSWORD", "DATABASE_URL", "GDRIVE_FOLDER_ID", "HEROKU_APIKEY", "CHROME_DRIVER", "YOUTUBE_API_KEY", "HEROKU_MEMEZ", "LASTFM_USERNAME", "G_DRIVE_CLIENT_ID", "API_KEY", "PM_AUTO_BAN", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "LASTFM_SECRET", "G_DRIVE_AUTH_TOKEN_DATA", "WEATHER_DEFCITY", "STRING_SESSION", "CONSOLE_LOGGER_VERBOSE", "ALIVE_NAME", "BOTLOG_CHATID", "TMP_DOWNLOAD_DIRECTORY", "CLEAN_WELCOME", "REM_BG_API_KEY", "BOTLOG", "API_HASH"]
|
python
| 39 | 0 | |
internal/storage/storage.go
|
// Package storage describes the interface for interacting with our datastore, and provides a PostgreSQL
// implementation for the slice-it-api.
package storage
import (
"database/sql"
"fmt"
"os"
// postgres driver
_ "github.com/lib/pq"
)
// URLRepository describes the interface for interacting with our datastore. This can viewed
// like a plug in adapter, making testing and/or switching datastores much more trivial.
type URLRepository interface {
Create(url SliceItURL) error
Get(urlHash string) (string, error)
GetViewCount(urlHash string) (int, error)
}
// SliceItURL represents a URL in in our system.
type SliceItURL struct {
ID int `json:"id,omitempty"`
Short string `json:"short,omitempty"`
Long string `json:"long,omitempty"`
ViewCount int `json:"view_count,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
}
// Db provides a set of methods for interacting with our database.
type Db struct {
*sql.DB
}
// NewDB creates a connection with our postgres database and returns it, otherwise an error.
func NewDB() (*Db, error) {
connStr := fmt.Sprintf(
"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
os.Getenv("SLICE_IT_API_DB_HOST"),
os.Getenv("SLICE_IT_API_DB_PORT"),
os.Getenv("SLICE_IT_API_DB_USER"),
os.Getenv("SLICE_IT_API_DB_PASSWORD"),
os.Getenv("SLICE_IT_API_DB_NAME"),
os.Getenv("SLICE_IT_API_SSL_MODE"),
)
db, err := sql.Open("postgres", connStr)
if err != nil {
return nil, err
}
if err := db.Ping(); err != nil {
return nil, err
}
return &Db{db}, nil
}
// Create handles inserting a SliceItURL into the database. With no other requirements, we
// don't need to return anything but an error if it happens.
func (db *Db) Create(url SliceItURL) error {
tx, err := db.Begin()
if err != nil {
return err
}
// Rollback is safe to call even if the tx is already closed,
// so if the tx commits successfully, this is a no-op
defer tx.Rollback()
// No need to persist any more than one of each URL. The hash function is deterministic
query := "INSERT INTO urls (short, long) VALUES ($1, $2) ON CONFLICT ON CONSTRAINT unique_url_constraint DO NOTHING;"
if _, err = tx.Exec(query, url.Short, url.Long); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
// Get takes a URL hash and finds and returns the full length original link
func (db *Db) Get(urlHash string) (string, error) {
query := "SELECT long FROM urls WHERE short = $1;"
var url SliceItURL
row := db.QueryRow(query, urlHash)
if err := row.Scan(&url.Long); err != nil {
return "", err
}
// Thought here: would we actually want this decoupled and have the caller use it after fetching a URL?
// We may not want every "Get" call here to do this. This is okay for now, explain thoughts to Jim.
if err := db.incrementViewCount(urlHash); err != nil {
return "", err
}
return url.Long, nil
}
// GetViewCount takes a short URL hash and finds and returns the view count stats from that URL
func (db *Db) GetViewCount(urlHash string) (int, error) {
query := "SELECT view_count FROM urls WHERE short = $1;"
var url SliceItURL
row := db.QueryRow(query, urlHash)
if err := row.Scan(&url.ViewCount); err != nil {
return 0, err
}
return url.ViewCount, nil
}
func (db *Db) incrementViewCount(urlHash string) error {
tx, err := db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
query := "UPDATE urls SET view_count = view_count + 1 WHERE short = $1;"
if _, err = tx.Exec(query, urlHash); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}
|
[
"\"SLICE_IT_API_DB_HOST\"",
"\"SLICE_IT_API_DB_PORT\"",
"\"SLICE_IT_API_DB_USER\"",
"\"SLICE_IT_API_DB_PASSWORD\"",
"\"SLICE_IT_API_DB_NAME\"",
"\"SLICE_IT_API_SSL_MODE\""
] |
[] |
[
"SLICE_IT_API_DB_PASSWORD",
"SLICE_IT_API_DB_USER",
"SLICE_IT_API_DB_PORT",
"SLICE_IT_API_SSL_MODE",
"SLICE_IT_API_DB_NAME",
"SLICE_IT_API_DB_HOST"
] |
[]
|
["SLICE_IT_API_DB_PASSWORD", "SLICE_IT_API_DB_USER", "SLICE_IT_API_DB_PORT", "SLICE_IT_API_SSL_MODE", "SLICE_IT_API_DB_NAME", "SLICE_IT_API_DB_HOST"]
|
go
| 6 | 0 | |
docs/conf.py
|
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../tests'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django
django.setup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-import-export'
copyright = '2012–2020, Bojan Mihelac'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
from import_export import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-import-export'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-import-export.tex', 'django-import-export Documentation',
'Bojan Mihelac', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-import-export', 'django-import-export Documentation',
['Bojan Mihelac'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-import-export', 'django-import-export Documentation', 'Bojan Mihelac',
'django-import-export', 'Import/export data for Django', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
texinfo_appendices = []
# intersphinx documentation
intersphinx_mapping = {
'tablib': ('https://tablib.readthedocs.io/en/stable/', None)
}
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
python/ray/tests/test_client_proxy.py
|
from glob import glob
import json
import os
import pytest
import random
import sys
import time
from unittest.mock import patch
import grpc
import ray
import ray.core.generated.ray_client_pb2 as ray_client_pb2
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.util.client.server.proxier as proxier
from ray._private.test_utils import run_string_as_driver
def start_ray_and_proxy_manager(n_ports=2):
agent_port = random.choice(range(50000, 55000))
ray_instance = ray.init(_redis_password="test")
agent_port = ray.worker.global_worker.node.metrics_agent_port
pm = proxier.ProxyManager(
ray_instance["redis_address"],
session_dir=ray_instance["session_dir"],
redis_password="test",
runtime_env_agent_port=agent_port)
free_ports = random.choices(range(45000, 45100), k=n_ports)
pm._free_ports = free_ports.copy()
return pm, free_ports
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
def test_proxy_manager_lifecycle(shutdown_only):
"""
Creates a ProxyManager and tests basic handling of the lifetime of a
specific RayClient Server. It checks the following properties:
1. The SpecificServer is created using the first port.
2. The SpecificServer comes alive and has a log associated with it.
3. The SpecificServer destructs itself when no client connects.
4. The ProxyManager returns the port of the destructed SpecificServer.
"""
proxier.CHECK_PROCESS_INTERVAL_S = 1
os.environ["TIMEOUT_FOR_SPECIFIC_SERVER_S"] = "5"
pm, free_ports = start_ray_and_proxy_manager(n_ports=2)
client = "client1"
pm.create_specific_server(client)
assert pm.start_specific_server(client, JobConfig())
# Channel should be ready and corresponding to an existing server
grpc.channel_ready_future(pm.get_channel(client)).result(timeout=5)
proc = pm._get_server_for_client(client)
assert proc.port == free_ports[0], f"Free Ports are: {free_ports}"
log_files_path = os.path.join(pm.node.get_session_dir_path(), "logs",
"ray_client_server*")
files = glob(log_files_path)
assert any(str(free_ports[0]) in f for f in files)
proc.process_handle_future.result().process.wait(10)
# Wait for reconcile loop
time.sleep(2)
assert len(pm._free_ports) == 2
assert pm._get_unused_port() == free_ports[1]
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
def test_proxy_manager_bad_startup(shutdown_only):
"""
Test that when a SpecificServer fails to start (because of a bad JobConfig)
that it is properly GC'd.
"""
proxier.CHECK_PROCESS_INTERVAL_S = 1
proxier.CHECK_CHANNEL_TIMEOUT_S = 1
pm, free_ports = start_ray_and_proxy_manager(n_ports=2)
client = "client1"
pm.create_specific_server(client)
assert not pm.start_specific_server(
client,
JobConfig(
runtime_env={"conda": "conda-env-that-sadly-does-not-exist"}))
# Wait for reconcile loop
time.sleep(2)
assert pm.get_channel(client) is None
assert len(pm._free_ports) == 2
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multiple_clients_use_different_drivers(call_ray_start):
"""
Test that each client uses a separate JobIDs and namespaces.
"""
with ray.client("localhost:25001").connect():
job_id_one = ray.get_runtime_context().job_id
namespace_one = ray.get_runtime_context().namespace
with ray.client("localhost:25001").connect():
job_id_two = ray.get_runtime_context().job_id
namespace_two = ray.get_runtime_context().namespace
assert job_id_one != job_id_two
assert namespace_one != namespace_two
check_we_are_second = """
import ray
info = ray.client('localhost:25005').connect()
assert info._num_clients == {num_clients}
"""
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --ray-client-server-port 25005 "
"--port 0 --redis-password=password"
],
indirect=True)
def test_correct_num_clients(call_ray_start):
"""
Checks that the returned value of `num_clients` correctly tracks clients
connecting and disconnecting.
"""
info = ray.client("localhost:25005").connect()
assert info._num_clients == 1
run_string_as_driver(check_we_are_second.format(num_clients=2))
ray.util.disconnect()
run_string_as_driver(check_we_are_second.format(num_clients=1))
check_connection = """
import ray
ray.client("localhost:25010").connect()
assert ray.util.client.ray.worker.log_client.log_thread.is_alive()
"""
@pytest.mark.skipif(
sys.platform != "linux",
reason="PSUtil does not work the same on windows & MacOS if flaky.")
def test_delay_in_rewriting_environment(shutdown_only):
"""
Check that a delay in `ray_client_server_env_prep` does not break
a Client connecting.
"""
proxier.LOGSTREAM_RETRIES = 3
proxier.LOGSTREAM_RETRY_INTERVAL_SEC = 1
ray_instance = ray.init()
server = proxier.serve_proxier(
"localhost:25010",
ray_instance["redis_address"],
session_dir=ray_instance["session_dir"])
def delay_in_rewrite(_input: JobConfig):
time.sleep(6)
return _input
with patch.object(proxier, "ray_client_server_env_prep", delay_in_rewrite):
run_string_as_driver(check_connection)
server.stop(0)
get_error = """
import ray
error = None
try:
ray.client("localhost:25030").connect()
except Exception as e:
error = e
assert error is not None, "Connect did not fail!"
assert "Initialization failure from server" in str(error), "Bad error msg"
assert "WEIRD_ERROR" in str(error), "Bad error msg"
"""
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
def test_startup_error_yields_clean_result(shutdown_only):
"""
Check that an error while preparing the environment yields an actionable,
clear error on the *client side*.
"""
ray_instance = ray.init()
server = proxier.serve_proxier(
"localhost:25030",
ray_instance["redis_address"],
session_dir=ray_instance["session_dir"])
def raise_not_rewrite(input: JobConfig):
raise RuntimeError("WEIRD_ERROR")
with patch.object(proxier, "ray_client_server_env_prep",
raise_not_rewrite):
run_string_as_driver(get_error)
server.stop(0)
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --ray-client-server-port 25031 "
"--port 0 --redis-password=password"
],
indirect=True)
def test_runtime_install_error_message(call_ray_start):
"""
Check that an error while preparing the runtime environment for the client
server yields an actionable, clear error on the *client side*.
"""
with pytest.raises(ConnectionAbortedError) as excinfo:
ray.client("localhost:25031").env({
"pip": ["ray-this-doesnt-exist"]
}).connect()
assert ("No matching distribution found for ray-this-doesnt-exist" in str(
excinfo.value)), str(excinfo.value)
ray.util.disconnect()
def test_prepare_runtime_init_req_fails():
"""
Check that a connection that is initiated with a non-Init request
raises an error.
"""
put_req = ray_client_pb2.DataRequest(put=ray_client_pb2.PutRequest())
with pytest.raises(AssertionError):
proxier.prepare_runtime_init_req(put_req)
def test_prepare_runtime_init_req_no_modification():
"""
Check that `prepare_runtime_init_req` properly extracts the JobConfig.
"""
job_config = JobConfig(worker_env={"KEY": "VALUE"}, ray_namespace="abc")
init_req = ray_client_pb2.DataRequest(
init=ray_client_pb2.InitRequest(
job_config=pickle.dumps(job_config),
ray_init_kwargs=json.dumps({
"log_to_driver": False
})), )
req, new_config = proxier.prepare_runtime_init_req(init_req)
assert new_config.serialize() == job_config.serialize()
assert isinstance(req, ray_client_pb2.DataRequest)
assert pickle.loads(
req.init.job_config).serialize() == new_config.serialize()
assert json.loads(req.init.ray_init_kwargs) == {"log_to_driver": False}
def test_prepare_runtime_init_req_modified_job():
"""
Check that `prepare_runtime_init_req` properly extracts the JobConfig and
modifies it according to `ray_client_server_env_prep`.
"""
job_config = JobConfig(worker_env={"KEY": "VALUE"}, ray_namespace="abc")
init_req = ray_client_pb2.DataRequest(
init=ray_client_pb2.InitRequest(
job_config=pickle.dumps(job_config),
ray_init_kwargs=json.dumps({
"log_to_driver": False
})))
def modify_namespace(job_config: JobConfig):
job_config.set_ray_namespace("test_value")
return job_config
with patch.object(proxier, "ray_client_server_env_prep", modify_namespace):
req, new_config = proxier.prepare_runtime_init_req(init_req)
assert new_config.ray_namespace == "test_value"
assert pickle.loads(
req.init.job_config).serialize() == new_config.serialize()
assert json.loads(req.init.ray_init_kwargs) == {"log_to_driver": False}
@pytest.mark.parametrize(
"test_case",
[ # no
(["ipython", "-m", "ray.util.client.server"], True),
(["ipython -m ray.util.client.server"], True),
(["ipython -m", "ray.util.client.server"], True),
(["bash", "ipython", "-m", "ray.util.client.server"], False),
(["bash", "ipython -m ray.util.client.server"], False),
(["python", "-m", "bash", "ipython -m ray.util.client.server"], False)
])
def test_match_running_client_server(test_case):
command, result = test_case
assert proxier._match_running_client_server(command) == result
@pytest.mark.parametrize("with_specific_server", [True, False])
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
def test_proxy_manager_internal_kv(shutdown_only, with_specific_server):
"""
Test that proxy manager can use internal kv with and without a
SpecificServer and that once a SpecificServer is started up, it
goes through it.
"""
proxier.CHECK_PROCESS_INTERVAL_S = 1
# The timeout has likely been set to 1 in an earlier test. Increase timeout
# to wait for the channel to become ready.
proxier.CHECK_CHANNEL_TIMEOUT_S = 5
os.environ["TIMEOUT_FOR_SPECIFIC_SERVER_S"] = "5"
pm, free_ports = start_ray_and_proxy_manager(n_ports=2)
client = "client1"
task_servicer = proxier.RayletServicerProxy(None, pm)
def make_internal_kv_calls():
response = task_servicer.KVPut(
ray_client_pb2.KVPutRequest(key=b"key", value=b"val"))
assert isinstance(response, ray_client_pb2.KVPutResponse)
assert not response.already_exists
response = task_servicer.KVPut(
ray_client_pb2.KVPutRequest(key=b"key", value=b"val2"))
assert isinstance(response, ray_client_pb2.KVPutResponse)
assert response.already_exists
response = task_servicer.KVGet(ray_client_pb2.KVGetRequest(key=b"key"))
assert isinstance(response, ray_client_pb2.KVGetResponse)
assert response.value == b"val"
response = task_servicer.KVPut(
ray_client_pb2.KVPutRequest(
key=b"key", value=b"val2", overwrite=True))
assert isinstance(response, ray_client_pb2.KVPutResponse)
assert response.already_exists
response = task_servicer.KVGet(ray_client_pb2.KVGetRequest(key=b"key"))
assert isinstance(response, ray_client_pb2.KVGetResponse)
assert response.value == b"val2"
with patch("ray.util.client.server.proxier._get_client_id_from_context"
) as mock_get_client_id:
mock_get_client_id.return_value = client
if with_specific_server:
pm.create_specific_server(client)
assert pm.start_specific_server(client, JobConfig())
channel = pm.get_channel(client)
assert channel is not None
task_servicer.Init(
ray_client_pb2.InitRequest(
job_config=pickle.dumps(JobConfig())))
# Mock out the internal kv calls in this process to raise an
# exception if they're called. This verifies that we are not
# making any calls in the proxier if there is a SpecificServer
# started up.
with patch(
"ray.experimental.internal_kv._internal_kv_put"
) as mock_put, patch(
"ray.experimental.internal_kv._internal_kv_get"
) as mock_get, patch(
"ray.experimental.internal_kv._internal_kv_initialized"
) as mock_initialized:
mock_put.side_effect = Exception("This shouldn't be called!")
mock_get.side_effect = Exception("This shouldn't be called!")
mock_initialized.side_effect = Exception(
"This shouldn't be called!")
make_internal_kv_calls()
else:
make_internal_kv_calls()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[] |
[] |
[
"TIMEOUT_FOR_SPECIFIC_SERVER_S"
] |
[]
|
["TIMEOUT_FOR_SPECIFIC_SERVER_S"]
|
python
| 1 | 0 | |
tests/test_countries_api.py
|
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.17.8
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import, print_function
import os
import time
import unittest
import velo_payments
from velo_payments.api.countries_api import CountriesApi # noqa: E501
from velo_payments.rest import ApiException
from pprint import pprint
# @unittest.skip("skipping")
class TestCountriesApi(unittest.TestCase):
"""CountriesApi unit test stubs"""
def setUp(self):
self.api = velo_payments.api.countries_api.CountriesApi() # noqa: E501
if os.environ.get('APITOKEN') == "":
configuration = velo_payments.Configuration()
# Configure HTTP basic authorization: basicAuth
configuration.username = os.environ.get('KEY')
configuration.password = os.environ.get('SECRET')
# Defining host is optional and default to https://api.sandbox.velopayments.com
configuration.host = os.environ.get('APIURL')
# Create an instance of the API class
api_instance = velo_payments.LoginApi(velo_payments.ApiClient(configuration))
grant_type = 'client_credentials' # str | OAuth grant type. Should use 'client_credentials' (optional) (default to 'client_credentials')
try:
# Authentication endpoint
api_response = api_instance.velo_auth(grant_type=grant_type)
os.environ["APITOKEN"] = api_response.access_token
except ApiException as e:
print("Exception when calling LoginApi->velo_auth: %s\n" % e)
def tearDown(self):
pass
def test_list_supported_countries_v1(self):
"""Test case for list_supported_countries_v1
List Supported Countries # noqa: E501
"""
configuration = velo_payments.Configuration()
configuration.access_token = os.environ["APITOKEN"]
configuration.host = os.environ.get('APIURL')
api_instance = velo_payments.CountriesApi(velo_payments.ApiClient(configuration))
try:
api_response = api_instance.list_supported_countries_v1()
except ApiException as e:
print("Exception when calling CountriesApi->list_supported_countries_v1: %s\n" % e)
def test_list_payment_channel_rules_v1(self):
"""Test case for list_payment_channel_rules_v1
List Payment Channel Country Rules # noqa: E501
"""
configuration = velo_payments.Configuration()
configuration.access_token = os.environ["APITOKEN"]
configuration.host = os.environ.get('APIURL')
api_instance = velo_payments.CountriesApi(velo_payments.ApiClient(configuration))
try:
api_response = api_instance.list_payment_channel_rules_v1()
except ApiException as e:
print("Exception when calling CountriesApi->list_payment_channel_rules_v1: %s\n" % e)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"APITOKEN",
"APIURL",
"KEY",
"SECRET"
] |
[]
|
["APITOKEN", "APIURL", "KEY", "SECRET"]
|
python
| 4 | 0 | |
database.py
|
import copy
import hashlib
import json
import os
import time
class DataBase:
def __init__(self):
self.file_db_init = "db_init.sql"
self.tables = ['go', ]
self.opposite = {
0: 0,
1: 2,
2: 1,
}
# self.sql_type = "PostgreSQL"
self.sql_types = {"SQLite": 0, "PostgreSQL": 1}
# self.sql_type = self.sql_types['PostgreSQL']
# self.sql_type = self.sql_types['SQLite']
if os.environ.get('PORT', '5000') == '5000':
# Local
self.sql_type = self.sql_types['SQLite']
else:
# Remote
self.sql_type = self.sql_types['PostgreSQL']
self.sql_chars = ["?", "%s"]
self.sql_char = self.sql_chars[self.sql_type]
self.connect_init()
def v(self, string: str):
return string.replace('%s', self.sql_char)
def make_result(self, code, **args):
result = {
"code": int(code),
"data": args
}
return json.dumps(result)
def connect_init(self):
if self.sql_type == self.sql_types['SQLite']:
import sqlite3 as sql
self.conn = sql.connect('data_sql.db', check_same_thread=False)
else:
import psycopg2 as sql
self.conn = sql.connect(host='ec2-50-17-246-114.compute-1.amazonaws.com',
database='de5kjan1c7bsh8',
user='fhvsqdrzvqgsww',
port='5432',
password='2fe833a144e72ffd656e1adc4ea49ad0571d329ecfa83c51c03c187df0b35152')
def cursor_get(self):
cursor = self.conn.cursor()
return cursor
def cursor_finish(self, cursor):
self.conn.commit()
cursor.close()
def db_init(self):
try:
cursor = self.cursor_get()
for table in self.tables:
try:
cursor.execute("DROP TABLE IF EXISTS %s" % table)
except Exception as e:
print('Error when dropping:', table, '\nException:\n', e)
self.cursor_finish(cursor)
cursor = self.cursor_get()
self.cursor_finish(cursor)
except Exception as e:
print(e)
self.conn.close()
self.connect_init()
cursor = self.cursor_get()
# 一次只能执行一个语句。需要分割。而且中间居然不能有空语句。。
with open(self.file_db_init, encoding='utf8') as f:
string = f.read()
for s in string.split(';'):
try:
if s != '':
cursor.execute(s)
except Exception as e:
print('Error:\n', s, 'Exception:\n', e)
self.cursor_finish(cursor)
def write(self, code: str, player: int, data_str: str, winner: int=0):
cursor = self.cursor_get()
cursor.execute(self.v("SELECT code FROM go WHERE code = %s"), (code, ))
data = cursor.fetchall()
if len(data) == 0:
cursor.execute(self.v("INSERT INTO go (code, status, data, uptime, winner) VALUES (%s, %s, %s, %s, %s)"),
(code, self.opposite[player], data_str, int(time.time()), 0))
self.cursor_finish(cursor)
else:
cursor.execute(self.v("UPDATE go SET status = %s, data = %s, uptime = %s, winner = %s WHERE code = %s"),
(self.opposite[player], data_str, int(time.time()), winner, code))
self.cursor_finish(cursor)
return self.make_result(0)
def read(self, code):
cursor = self.cursor_get()
cursor.execute(self.v("SELECT status, data, uptime, winner FROM go WHERE code = %s"), (code, ))
data = cursor.fetchall()
self.cursor_finish(cursor)
if len(data) == 0:
return {
"code": code,
"status": 0,
"data": '',
"uptime": 0,
"winner": 0,
"error": "No such of code."
}
data = data[0]
return {
"code": code,
"status": data[0],
"data": data[1],
"uptime": data[2],
"winner": data[3],
}
def jsonify(string: str):
return json.loads(string)
if __name__ == '__main__':
db = DataBase()
db.db_init()
print(db.opposite[0])
db.write('code', 1, '0000\n0000\n0000\n0000', 0)
print(db.read('code'))
db.write('code', 1, '0000\n0010\n0000\n0000', 0)
print(db.read('code'))
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
chalk/utils.py
|
from __future__ import absolute_import, print_function
from os import linesep
from sys import version_info
from six import python_2_unicode_compatible, string_types
from six.moves import map
# ansi standards http://ascii-table.com/ansi-escape-sequences.php
COLOR_NAMES = (
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
)
COLOR_SET = set(COLOR_NAMES)
COLOR_MAP = dict(zip(COLOR_NAMES, map(str, range(len(COLOR_NAMES)))))
FORMAT_NAMES = ('bold', 'underline', 'blink', 'reverse', 'hide')
FORMAT_VALUES = ('1', '4', '5', '7', '8')
FORMAT_SET = set(FORMAT_NAMES)
FORMAT_MAP = dict(zip(FORMAT_NAMES, FORMAT_VALUES))
ESC = '\x1b[%sm'
RESET = ESC % '0'
@python_2_unicode_compatible
class Style(object):
"""Style is a callable, mutable unicode styling base class.
Usage:
style = Style(1)
bold_text = style('foo') # '\x1b[1mfoo\x1b[0m'
style += Style(32)
green_bold_text = style('bar') # '\x1b[1;32mfoo\x1b[0m'
print(style + 'baz') # prints baz as green bold text
"""
__slots__ = ['value']
def __init__(self, value):
self.value = to_str(value)
def __add__(self, other):
if isinstance(other, string_types):
return str(self) + other
elif not isinstance(other, Style):
raise TypeError(
'concat operator is only supported for string and Style types'
)
return other.__radd__(self)
def __radd__(self, other):
if isinstance(other, string_types):
return other + str(self)
return Style(';'.join([other.value, self.value]))
def __iadd__(self, other):
self.value = self.__add__(other).value
return self
def __str__(self):
return ESC % self.value
def __eq__(self, other):
return other == str(self)
def __ne__(self, other):
return other != str(self)
def __call__(self, txt, reset=True, new_line=False):
txt = to_str(txt)
if reset:
txt += RESET
if new_line:
txt += linesep
return str(self) + txt
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.value)
def clone(self):
"""Replicates the current instance, without being effected by the
modified behaviour of any subclasses.
"""
return Style(self.value)
class FontFormat(Style):
"""chalk.utils.Style subclass
Usage:
style = FontFormat('bold')
bold_text = style('foo') # '\x1b[1mfoo\x1b[0m'
style += FontFormat('underline')
bold_underlined_text = style('bar') # '\x1b[1;4mbar\x1b[0m'
"""
def __init__(self, value):
super(FontFormat, self).__init__(value)
if self.value in FORMAT_SET:
self.value = FORMAT_MAP[self.value]
elif self.value not in FORMAT_VALUES:
raise ValueError(
'FontFormat values should be a member of: {}'.format(
', '.join(FORMAT_NAMES + FORMAT_VALUES)
)
)
class Color(Style):
"""chalk.utils.Style subclass: Base class to facilitate referencing colors
by name.
Usage:
class ForegroundColor(Color):
PREFIX = 3
style = ForegroundColor('red')
red_txt = style('foo') # '\x1b[31mfoo\x1b[0m'
"""
PREFIX = NotImplemented
def __init__(self, value):
super(Color, self).__init__(value)
self.value = self.get_color(self.value)
def get_color(self, value):
"""Helper method to validate and map values used in the instantiation of
of the Color object to the correct unicode value.
"""
if value in COLOR_SET:
value = COLOR_MAP[value]
else:
try:
value = int(value)
if value >= 8:
raise ValueError()
except ValueError as exc:
raise ValueError(
'Colors should either a member of: {} or a positive '
'integer below 8'.format(', '.join(COLOR_NAMES))
)
return '{}{}'.format(self.PREFIX, value)
class ForegroundColor(Color):
"""chalk.utils.Color subclass
Usage:
style = ForegroundColor('red')
red_txt = style('foo') # '\x1b[31mfoo\x1b[0m'
"""
PREFIX = 3
class BackgroundColor(Color):
"""chalk.utils.Color subclass
Usage:
style = BackgroundColor('red')
red_txt = style('foo') # '\x1b[41mfoo\x1b[0m'
"""
PREFIX = 4
@python_2_unicode_compatible
class Chalk(object):
"""Instances of the Chalk class serve to leverage the properties of
chalk.utils.Style by exposing itself as a callable interface.
Usage:
white = Chalk('white')
white('foo', bold=True, underline=True)
# returns '\x1b[37;1;4mfoo\x1b[0m'
bold_white = white + FontFormat('bold')
bold_white('foo')
# returns '\x1b[37m\x1b[1mfoo\x1b[0m'
bold_white + 'foo'
# returns '\x1b[37;1;4mfoo'
"""
__slots__ = ['style']
def __init__(self, foreground_or_style, background=None):
if isinstance(foreground_or_style, Style):
self.style = foreground_or_style
else:
self.style = ForegroundColor(foreground_or_style)
if background is not None:
self.style += BackgroundColor(background)
def __add__(self, other):
if isinstance(other, string_types):
return str(self) + other
elif isinstance(other, (Chalk, Style)):
chalk = self.clone()
chalk += other
return chalk
raise ValueError(
'concat operator is only supported for string and Style types'
)
def __radd__(self, other):
return other.__add__(str(self))
def __iadd__(self, other):
if not isinstance(other, (Style, Chalk)):
raise ValueError(
'concat operator is only supported for string and Style types'
)
self.style += other
return self
def __call__(self, txt, reset=True, new_line=False, **kwargs):
style = self.get_style(**kwargs)
return style(txt, reset=reset, new_line=new_line)
def get_style(self, **kwargs):
"""Helper method to ensure that the instantiated style isn't impacted
by each execution of __call__
"""
style = self.style.clone()
for key, value in kwargs.items():
if value and key in FORMAT_SET:
style += FontFormat(FORMAT_MAP[key])
elif key == 'foreground':
style += ForegroundColor(value)
elif key == 'background':
style += BackgroundColor(value)
return style
def __eq__(self, other):
return other == self.style
def __ne__(self, other):
return other != self.style
def __str__(self):
return str(self.style)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self)
def clone(self):
return self.__class__(self.style.clone())
def eraser(new_line=False):
"""Equivalent to running bash 'clear' command
"""
output = '\x1b[2J\x1b[0;0H'
if new_line:
output += linesep
return output
PY3 = (version_info >= (3, 0))
def to_str(obj):
"""Attempts to convert given object to a string object
"""
if not isinstance(obj, str) and PY3 and isinstance(obj, bytes):
obj = obj.decode('utf-8')
return obj if isinstance(obj, string_types) else str(obj)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
lib/test/vot20/stark_st50.py
|
from lib.test.vot20.stark_vot20 import run_vot_exp
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
run_vot_exp('stark_st', 'baseline', vis=False)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
examples/twitter_or_facebook/settings.py
|
# coding=utf-8
import os
from flask import url_for
# DEBUG = True
DEBUG = False
# Create an app and get your own credentials here:
# https://dev.twitter.com/apps
TWITTER_KEY = os.getenv(
'TWITTER_KEY', '<paste your twitter app key here>')
TWITTER_SECRET = os.getenv(
'TWITTER_SECRET', '<paste your twitter app secret here>')
# Create an app and get your own credentials here:
# https://developers.facebook.com/apps
FACEBOOK_APP_ID = os.getenv(
'FACEBOOK_APP_ID', '<paste your facebook app_id here>')
FACEBOOK_APP_SECRET = os.getenv(
'FACEBOOK_APP_SECRET', '<paste your facebook app secret here>')
SQLALCHEMY_URI = os.getenv('DATABASE_URL', 'sqlite:///db.sqlite')
SECRET_KEY = os.getenv('SECRET_KEY', 'development key')
AUTH_SETTINGS = {
'sign_in_redirect': lambda r: url_for('profile'),
'sign_out_redirect': lambda r: url_for('index'),
'password_minlen': 5,
'url_sign_in': lambda r: url_for('sign_in'),
'views': [],
}
try:
from local import * # noqa
except ImportError:
pass
|
[] |
[] |
[
"DATABASE_URL",
"FACEBOOK_APP_SECRET",
"TWITTER_SECRET",
"SECRET_KEY",
"FACEBOOK_APP_ID",
"TWITTER_KEY"
] |
[]
|
["DATABASE_URL", "FACEBOOK_APP_SECRET", "TWITTER_SECRET", "SECRET_KEY", "FACEBOOK_APP_ID", "TWITTER_KEY"]
|
python
| 6 | 0 | |
backport.py
|
#!/usr/bin/env python
from __future__ import annotations
import argparse
import contextlib
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from typing import Any, Callable, Iterator, Optional, Tuple, TYPE_CHECKING
import github
logger = logging.getLogger(__name__)
ExitCode = int
if TYPE_CHECKING:
# Support Python 3.7. Use typing_extensions because mypy installs it.
# `try: from typing import Literal` causes:
# error: Module 'typing' has no attribute 'Literal' [attr-defined]
from typing_extensions import Literal
TempdirDeleteOption = Literal[True, False, 'on-success']
class GracefulError(Exception):
pass
class NoActionRequiredError(GracefulError):
pass
class GitCommandError(Exception):
def __init__(self, msg: str, cmd: list[str]):
super(GitCommandError, self).__init__(msg)
self.cmd = cmd
def __str__(self) -> str:
return "{}\nCommand: {}".format(
super(GitCommandError, self).__str__(),
str(self.cmd))
@contextlib.contextmanager
def tempdir(
delete: TempdirDeleteOption = True, **kwargs: Any) -> Iterator[str]:
assert delete in (True, False, 'on-success')
temp_dir = tempfile.mkdtemp(**kwargs)
succeeded = False
try:
yield temp_dir
succeeded = True
except Exception:
raise
finally:
if delete is True or (delete == 'on-success' and succeeded):
shutil.rmtree(temp_dir, ignore_errors=True)
@contextlib.contextmanager
def git_work_dir(use_cwd: bool, **tempdir_kwargs: Any) -> Iterator[str]:
if use_cwd:
yield os.getcwd()
else:
with tempdir(**tempdir_kwargs) as tempd:
yield os.path.join(tempd, 'work')
def git(args: list[str], cd: Optional[str] = None) -> None:
cmd = ['git']
if cd is not None:
assert os.path.isdir(cd)
cmd += ['-C', cd]
cmd += list(args)
print('**GIT** {}'.format(' '.join(cmd)))
proc = subprocess.run(cmd)
if proc.returncode != 0:
raise GitCommandError(
"Git command failed with code {}".format(proc.returncode),
cmd)
print('')
class App(object):
def __init__(
self, token: str, organ_name: str, repo_name: str,
debug: bool = False):
assert isinstance(organ_name, str)
assert isinstance(repo_name, str)
self.repo_name = repo_name
self.organ_name = organ_name
self.g = github.Github(token)
self.repo = self.g.get_repo('{}/{}'.format(organ_name, repo_name))
self.user_name = self.g.get_user().login
self.debug = debug
def run_cli(self, **kwargs: Any) -> ExitCode:
try:
self._run(**kwargs)
except NoActionRequiredError as e:
sys.stderr.write('No action required: {}\n'.format(e))
except GracefulError as e:
sys.stderr.write('Error: {}\n'.format(e))
return 1
return 0
def run_bot(self, *, pr_num: int, **kwargs: Any) -> ExitCode:
try:
self._run(pr_num=pr_num, **kwargs)
except NoActionRequiredError as e:
sys.stderr.write('No action required: {}\n'.format(e))
except Exception as e:
sys.stderr.write('Backport failed: {}\n'.format(e))
pr = self.repo.get_pull(pr_num)
mention = 'cupy/code-owners'
if pr.is_merged():
merged_by = pr.merged_by.login
if not merged_by.endswith('[bot]'):
mention = merged_by
elif pr.assignee is not None:
# For PRs merged by bots (Mergify), mention assignee.
mention = pr.assignee.login
pr.create_issue_comment(f'''\
@{mention} Failed to backport automatically.
----
```
{e}
```
''')
return 0
def _run(self, *, pr_num: Optional[int], sha: Optional[str],
target_branch: str, is_continue: bool,
abort_before_push: bool, https: bool) -> None:
assert isinstance(pr_num, int) and pr_num >= 1 or pr_num is None
assert (pr_num is None and sha is not None) or (
pr_num is not None and sha is None
)
assert isinstance(target_branch, str)
assert isinstance(is_continue, bool)
assert isinstance(abort_before_push, bool)
assert isinstance(https, bool)
# Get information of the original pull request
if sha is not None:
pr_num, branch_name, _ = self.parse_log_message(sha)
assert pr_num is not None
pr = self.repo.get_pull(pr_num)
if not pr.merged:
raise GracefulError('PR #{} is not merged'.format(pr_num))
merge_commit_sha = pr.merge_commit_sha
_, branch_name, _ = self.parse_log_message(merge_commit_sha)
title = pr.title
pr_issue = self.repo.get_issue(pr_num)
labels = set(label.name for label in pr_issue.labels)
if 'to-be-backported' not in labels:
raise NoActionRequiredError(
'PR #{} doesn\'t have \'to-be-backported\' label.'.format(
pr_num))
labels.remove('to-be-backported')
labels.discard('reviewer-team')
labels = set(_ for _ in labels if not _.startswith('st:'))
organ_name = self.organ_name
user_name = self.user_name
repo_name = self.repo_name
if https:
uri_template = 'https://github.com/{}/{}'
else:
uri_template = '[email protected]:{}/{}'
origin_remote = uri_template.format(organ_name, repo_name)
user_remote = uri_template.format(user_name, repo_name)
bp_branch_name = 'bp-{}-{}-{}'.format(pr_num,
target_branch, branch_name)
delete: TempdirDeleteOption
if self.debug or abort_before_push:
delete = False
else:
delete = 'on-success'
with git_work_dir(
use_cwd=is_continue, prefix='bp-', delete=delete) as workd:
assert workd is not None
print(workd)
def git_(cmd: list[str]) -> None:
return git(cmd, cd=workd)
manual_steps = (
'Working tree is saved at: {workd}\n\n'
'Follow these steps:\n\n'
' 1. Go to the working tree:\n\n'
' cd {workd}\n\n'
' 2. Manually resolve the conflict.\n\n'
' 3. Continue cherry-pick.\n\n'
' git cherry-pick --continue\n\n'
' 4. Run the backport script with the --continue option.\n\n'
' {backport} --continue\n\n\n').format(
workd=workd,
backport=' '.join([shlex.quote(v) for v in sys.argv]))
if not is_continue:
# Clone target repo
git(['clone', '--branch', target_branch, origin_remote, workd])
# Create backport branch
git_(['checkout', '-b', bp_branch_name])
git_(['fetch', 'origin', merge_commit_sha])
try:
git_(['cherry-pick', '-m1', merge_commit_sha])
except GitCommandError:
sys.stderr.write(
'Cherry-pick failed.\n{}'.format(manual_steps))
raise GracefulError('Not cleanly cherry-picked')
if abort_before_push:
sys.stderr.write(
'Backport procedure has been aborted due to'
' configuration.\n{}'.format(manual_steps))
raise GracefulError('Aborted')
# Push to user remote
git_(['push', user_remote, 'HEAD'])
# Create backport pull request
print("Creating a pull request.")
bp_pr = self.repo.create_pull(
title='[backport] {}'.format(title),
head='{}:{}'.format(self.user_name, bp_branch_name),
base=target_branch,
body='Backport of #{} by @{}'.format(pr_num, pr.user.login))
bp_pr_issue = self.repo.get_issue(bp_pr.number)
bp_pr_issue.set_labels('backport', *list(labels))
bp_pr_issue.create_comment(
'[automatic post] Jenkins, test this please.')
print("Done.")
print(bp_pr.html_url)
def parse_log_message(self, commit: str) -> Tuple[int, str, str]:
msg = self.repo.get_commit(commit).commit.message
head_msg, _, title = msg.split('\n')[:3]
pattern = r'^Merge pull request #(?P<pr_num>[0-9]+) from [^ /]+/(?P<branch_name>[^ ]+)$' # NOQA
m = re.match(pattern, head_msg)
if m is None:
raise GracefulError('Invalid log message: {}'.format(head_msg))
pr_num = int(m.group('pr_num'))
branch_name = m.group('branch_name')
return pr_num, branch_name, title
def main(args_: list[str]) -> ExitCode:
parser = argparse.ArgumentParser()
parser.add_argument(
'--repo', required=True,
choices=('chainer', 'cupy', 'cupy-release-tools'),
help='target repository')
parser.add_argument(
'--token', type=str, default=None,
help='GitHub access token.')
parser.add_argument(
'--pr', default=None, type=int,
help='The original PR number to be backported. Exclusive with --sha')
parser.add_argument(
'--sha', default=None, type=str,
help='The SHA hash of the merge commit. Exclusive with --pr')
parser.add_argument(
'--branch', type=str, default='v9',
help='Target branch to make a backport')
parser.add_argument(
'--https', action='store_true', default=False,
help='Use HTTPS instead of SSH for git access')
parser.add_argument(
'--debug', action='store_true')
parser.add_argument(
'--continue', action='store_true', dest='is_continue',
help='Continues the process suspended by conflict situation. Run from'
' the working tree directory.')
parser.add_argument(
'--abort-before-push', action='store_true',
help='Abort the procedure before making an push. Useful if you want to'
' make some modification to the backport branch. Use --continue to'
' make an actual push after making modification.')
parser.add_argument(
'--bot', action='store_true', default=False,
help='Leave a comment when backport failed. This is intended for use'
' with GitHub workflow.')
args = parser.parse_args(args_)
target_branch = args.branch
if args.repo == 'chainer':
organ_name, repo_name = 'chainer', 'chainer'
elif args.repo == 'cupy':
organ_name, repo_name = 'cupy', 'cupy'
elif args.repo == 'cupy-release-tools':
organ_name, repo_name = 'cupy', 'cupy-release-tools'
else:
assert False
if args.pr is None and args.sha is None:
parser.error('Specify only --pr or --sha')
if args.pr is not None and args.sha is not None:
parser.error('Can\'t specify both --pr and --sha')
github_token = args.token
if github_token is None:
if 'BACKPORT_GITHUB_TOKEN' not in os.environ:
parser.error('GitHub Access token must be specified with '
'--token or BACKPORT_GITHUB_TOKEN '
'environment variable.')
github_token = os.environ['BACKPORT_GITHUB_TOKEN']
if args.debug:
github.enable_console_debug_logging()
app = App(
github_token,
organ_name=organ_name,
repo_name=repo_name)
run_func: Callable[..., ExitCode] = app.run_cli
if args.bot:
print('Running as bot mode (will leave a comment when failed).')
run_func = app.run_bot
return run_func(
pr_num=args.pr,
sha=args.sha,
target_branch=target_branch,
is_continue=args.is_continue,
abort_before_push=args.abort_before_push,
https=args.https)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv[1:]))
|
[] |
[] |
[
"BACKPORT_GITHUB_TOKEN"
] |
[]
|
["BACKPORT_GITHUB_TOKEN"]
|
python
| 1 | 0 | |
pkg/security/module/module.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build linux
// +build linux
package module
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/signal"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/DataDog/datadog-agent/cmd/system-probe/api/module"
sapi "github.com/DataDog/datadog-agent/pkg/security/api"
sconfig "github.com/DataDog/datadog-agent/pkg/security/config"
skernel "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel"
seclog "github.com/DataDog/datadog-agent/pkg/security/log"
"github.com/DataDog/datadog-agent/pkg/security/metrics"
sprobe "github.com/DataDog/datadog-agent/pkg/security/probe"
"github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval"
"github.com/DataDog/datadog-agent/pkg/security/secl/model"
"github.com/DataDog/datadog-agent/pkg/security/secl/rules"
"github.com/DataDog/datadog-agent/pkg/util/kernel"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/version"
"github.com/DataDog/datadog-go/statsd"
)
// Module represents the system-probe module for the runtime security agent
type Module struct {
sync.RWMutex
wg sync.WaitGroup
probe *sprobe.Probe
config *sconfig.Config
ruleSets [2]*rules.RuleSet
currentRuleSet uint64
reloading uint64
statsdClient *statsd.Client
apiServer *APIServer
grpcServer *grpc.Server
listener net.Listener
rateLimiter *RateLimiter
sigupChan chan os.Signal
ctx context.Context
cancelFnc context.CancelFunc
rulesLoaded func(rs *rules.RuleSet)
policiesVersions []string
selfTester *SelfTester
}
// Register the runtime security agent module
func (m *Module) Register(_ *module.Router) error {
if err := m.Init(); err != nil {
return err
}
return m.Start()
}
func (m *Module) sanityChecks() error {
// make sure debugfs is mounted
if mounted, err := kernel.IsDebugFSMounted(); !mounted {
return err
}
version, err := skernel.NewKernelVersion()
if err != nil {
return err
}
if version.Code >= skernel.Kernel5_13 && kernel.GetLockdownMode() == kernel.Confidentiality {
return errors.New("eBPF not supported in lockdown `confidentiality` mode")
}
isWriteUserNotSupported := version.Code >= skernel.Kernel5_13 && kernel.GetLockdownMode() == kernel.Integrity
if m.config.ERPCDentryResolutionEnabled && isWriteUserNotSupported {
log.Warn("eRPC path resolution is not supported in lockdown `integrity` mode")
m.config.ERPCDentryResolutionEnabled = false
}
return nil
}
// Init initializes the module
func (m *Module) Init() error {
if err := m.sanityChecks(); err != nil {
return err
}
// force socket cleanup of previous socket not cleanup
os.Remove(m.config.SocketPath)
ln, err := net.Listen("unix", m.config.SocketPath)
if err != nil {
return errors.Wrap(err, "unable to register security runtime module")
}
if err := os.Chmod(m.config.SocketPath, 0700); err != nil {
return errors.Wrap(err, "unable to register security runtime module")
}
m.listener = ln
m.wg.Add(1)
go func() {
defer m.wg.Done()
if err := m.grpcServer.Serve(ln); err != nil {
log.Error(err)
}
}()
// start api server
m.apiServer.Start(m.ctx)
m.probe.SetEventHandler(m)
// initialize the eBPF manager and load the programs and maps in the kernel. At this stage, the probes are not
// running yet.
if err := m.probe.Init(m.statsdClient); err != nil {
return errors.Wrap(err, "failed to init probe")
}
return nil
}
// Start the module
func (m *Module) Start() error {
// start the manager and its probes / perf maps
if err := m.probe.Start(); err != nil {
return errors.Wrap(err, "failed to start probe")
}
if err := m.Reload(); err != nil {
return err
}
// fetch the current state of the system (example: mount points, running processes, ...) so that our user space
// context is ready when we start the probes
if err := m.probe.Snapshot(); err != nil {
return err
}
m.wg.Add(1)
go m.metricsSender()
signal.Notify(m.sigupChan, syscall.SIGHUP)
m.wg.Add(1)
go func() {
defer m.wg.Done()
for range m.sigupChan {
log.Info("Reload configuration")
if err := m.Reload(); err != nil {
log.Errorf("failed to reload configuration: %s", err)
}
}
}()
return nil
}
func (m *Module) displayReport(report *sprobe.Report) {
content, _ := json.Marshal(report)
log.Debugf("Policy report: %s", content)
}
func (m *Module) getEventTypeEnabled() map[eval.EventType]bool {
enabled := make(map[eval.EventType]bool)
categories := model.GetEventTypePerCategory()
if m.config.FIMEnabled {
if eventTypes, exists := categories[model.FIMCategory]; exists {
for _, eventType := range eventTypes {
enabled[eventType] = true
}
}
}
if m.config.RuntimeEnabled {
// everything but FIM
for _, category := range model.GetAllCategories() {
if category == model.FIMCategory {
continue
}
if eventTypes, exists := categories[category]; exists {
for _, eventType := range eventTypes {
enabled[eventType] = true
}
}
}
}
return enabled
}
func logMultiErrors(msg string, m *multierror.Error) {
var errorLevel bool
for _, err := range m.Errors {
if rErr, ok := err.(*rules.ErrRuleLoad); ok {
if !errors.Is(rErr.Err, rules.ErrEventTypeNotEnabled) {
errorLevel = true
}
}
}
if errorLevel {
log.Errorf(msg, m.Error())
} else {
log.Warnf(msg, m.Error())
}
}
func getPoliciesVersions(rs *rules.RuleSet) []string {
var versions []string
cache := make(map[string]bool)
for _, rule := range rs.GetRules() {
version := rule.Definition.Policy.Version
if _, exists := cache[version]; !exists {
cache[version] = true
versions = append(versions, version)
}
}
return versions
}
// Reload the rule set
func (m *Module) Reload() error {
m.Lock()
defer m.Unlock()
atomic.StoreUint64(&m.reloading, 1)
defer atomic.StoreUint64(&m.reloading, 0)
policiesDir := m.config.PoliciesDir
rsa := sprobe.NewRuleSetApplier(m.config, m.probe)
var opts rules.Opts
opts.
WithConstants(model.SECLConstants).
WithVariables(sprobe.SECLVariables).
WithSupportedDiscarders(sprobe.SupportedDiscarders).
WithEventTypeEnabled(m.getEventTypeEnabled()).
WithReservedRuleIDs(sprobe.AllCustomRuleIDs()).
WithLegacyFields(model.SECLLegacyFields).
WithLogger(&seclog.PatternLogger{})
model := &model.Model{}
approverRuleSet := rules.NewRuleSet(model, model.NewEvent, &opts)
loadApproversErr := rules.LoadPolicies(policiesDir, approverRuleSet)
ruleSet := m.probe.NewRuleSet(&opts)
loadErr := rules.LoadPolicies(policiesDir, ruleSet)
if loadErr.ErrorOrNil() != nil {
logMultiErrors("error while loading policies: %+v", loadErr)
} else if loadApproversErr.ErrorOrNil() != nil {
logMultiErrors("error while loading policies for Approvers: %+v", loadApproversErr)
}
monitor := m.probe.GetMonitor()
ruleSetLoadedReport := monitor.PrepareRuleSetLoadedReport(ruleSet, loadErr)
if m.selfTester != nil {
if err := m.selfTester.CreateTargetFileIfNeeded(); err != nil {
log.Errorf("failed to create self-test target file: %+v", err)
}
m.selfTester.AddSelfTestRulesToRuleSets(ruleSet, approverRuleSet)
}
approvers, err := approverRuleSet.GetApprovers(sprobe.GetCapababilities())
if err != nil {
return err
}
m.policiesVersions = getPoliciesVersions(ruleSet)
ruleSet.AddListener(m)
if m.rulesLoaded != nil {
m.rulesLoaded(ruleSet)
}
currentRuleSet := 1 - atomic.LoadUint64(&m.currentRuleSet)
m.ruleSets[currentRuleSet] = ruleSet
atomic.StoreUint64(&m.currentRuleSet, currentRuleSet)
// analyze the ruleset, push default policies in the kernel and generate the policy report
report, err := rsa.Apply(ruleSet, approvers)
if err != nil {
return err
}
// full list of IDs, user rules + custom
var ruleIDs []rules.RuleID
ruleIDs = append(ruleIDs, ruleSet.ListRuleIDs()...)
ruleIDs = append(ruleIDs, sprobe.AllCustomRuleIDs()...)
m.apiServer.Apply(ruleIDs)
m.rateLimiter.Apply(ruleIDs)
m.displayReport(report)
// report that a new policy was loaded
monitor.ReportRuleSetLoaded(ruleSetLoadedReport)
return nil
}
// Close the module
func (m *Module) Close() {
close(m.sigupChan)
m.cancelFnc()
if m.grpcServer != nil {
m.grpcServer.Stop()
}
if m.listener != nil {
m.listener.Close()
os.Remove(m.config.SocketPath)
}
if m.selfTester != nil {
_ = m.selfTester.Cleanup()
}
m.probe.Close()
m.wg.Wait()
}
// EventDiscarderFound is called by the ruleset when a new discarder discovered
func (m *Module) EventDiscarderFound(rs *rules.RuleSet, event eval.Event, field eval.Field, eventType eval.EventType) {
if atomic.LoadUint64(&m.reloading) == 1 {
return
}
if err := m.probe.OnNewDiscarder(rs, event.(*sprobe.Event), field, eventType); err != nil {
seclog.Trace(err)
}
}
// HandleEvent is called by the probe when an event arrives from the kernel
func (m *Module) HandleEvent(event *sprobe.Event) {
if ruleSet := m.GetRuleSet(); ruleSet != nil {
ruleSet.Evaluate(event)
}
}
// HandleCustomEvent is called by the probe when an event should be sent to Datadog but doesn't need evaluation
func (m *Module) HandleCustomEvent(rule *rules.Rule, event *sprobe.CustomEvent) {
m.SendEvent(rule, event, func() []string { return nil }, "")
}
// RuleMatch is called by the ruleset when a rule matches
func (m *Module) RuleMatch(rule *rules.Rule, event eval.Event) {
// prepare the event
m.probe.OnRuleMatch(rule, event.(*sprobe.Event))
// needs to be resolved here, outside of the callback as using process tree
// which can be modified during queuing
service := event.(*sprobe.Event).GetProcessServiceTag()
id := event.(*sprobe.Event).ContainerContext.ID
extTagsCb := func() []string {
var tags []string
// check from tagger
if service == "" {
service = m.probe.GetResolvers().TagsResolver.GetValue(id, "service")
}
if service == "" {
service = m.config.HostServiceName
}
return append(tags, m.probe.GetResolvers().TagsResolver.Resolve(id)...)
}
if m.selfTester != nil {
m.selfTester.SendEventIfExpecting(rule, event)
}
m.SendEvent(rule, event, extTagsCb, service)
}
// SendEvent sends an event to the backend after checking that the rate limiter allows it for the provided rule
func (m *Module) SendEvent(rule *rules.Rule, event Event, extTagsCb func() []string, service string) {
if m.rateLimiter.Allow(rule.ID) {
m.apiServer.SendEvent(rule, event, extTagsCb, service)
} else {
seclog.Tracef("Event on rule %s was dropped due to rate limiting", rule.ID)
}
}
func (m *Module) metricsSender() {
defer m.wg.Done()
statsTicker := time.NewTicker(m.config.StatsPollingInterval)
defer statsTicker.Stop()
heartbeatTicker := time.NewTicker(15 * time.Second)
defer heartbeatTicker.Stop()
for {
select {
case <-statsTicker.C:
if os.Getenv("RUNTIME_SECURITY_TESTSUITE") == "true" {
continue
}
if err := m.probe.SendStats(); err != nil {
log.Debug(err)
}
if err := m.rateLimiter.SendStats(); err != nil {
log.Debug(err)
}
if err := m.apiServer.SendStats(); err != nil {
log.Debug(err)
}
case <-heartbeatTicker.C:
tags := []string{fmt.Sprintf("version:%s", version.AgentVersion)}
m.RLock()
for _, version := range m.policiesVersions {
tags = append(tags, fmt.Sprintf("policies_version:%s", version))
}
m.RUnlock()
if m.config.RuntimeEnabled {
_ = m.statsdClient.Gauge(metrics.MetricSecurityAgentRuntimeRunning, 1, tags, 1)
} else if m.config.FIMEnabled {
_ = m.statsdClient.Gauge(metrics.MetricSecurityAgentFIMRunning, 1, tags, 1)
}
case <-m.ctx.Done():
return
}
}
}
// GetStats returns statistics about the module
func (m *Module) GetStats() map[string]interface{} {
debug := map[string]interface{}{}
if m.probe != nil {
debug["probe"] = m.probe.GetDebugStats()
} else {
debug["probe"] = "not_running"
}
return debug
}
// GetProbe returns the module's probe
func (m *Module) GetProbe() *sprobe.Probe {
return m.probe
}
// GetRuleSet returns the set of loaded rules
func (m *Module) GetRuleSet() (rs *rules.RuleSet) {
return m.ruleSets[atomic.LoadUint64(&m.currentRuleSet)]
}
// SetRulesetLoadedCallback allows setting a callback called when a rule set is loaded
func (m *Module) SetRulesetLoadedCallback(cb func(rs *rules.RuleSet)) {
m.rulesLoaded = cb
}
// NewModule instantiates a runtime security system-probe module
func NewModule(cfg *sconfig.Config) (module.Module, error) {
var statsdClient *statsd.Client
var err error
if cfg != nil {
statsdAddr := os.Getenv("STATSD_URL")
if statsdAddr == "" {
statsdAddr = cfg.StatsdAddr
}
if statsdClient, err = statsd.New(statsdAddr); err != nil {
return nil, err
}
} else {
log.Warn("metrics won't be sent to DataDog")
}
probe, err := sprobe.NewProbe(cfg, statsdClient)
if err != nil {
return nil, err
}
ctx, cancelFnc := context.WithCancel(context.Background())
// custom limiters
limits := make(map[rules.RuleID]Limit)
var selfTester *SelfTester
if cfg.SelfTestEnabled {
selfTester = NewSelfTester()
}
m := &Module{
config: cfg,
probe: probe,
statsdClient: statsdClient,
apiServer: NewAPIServer(cfg, probe, statsdClient),
grpcServer: grpc.NewServer(),
rateLimiter: NewRateLimiter(statsdClient, LimiterOpts{Limits: limits}),
sigupChan: make(chan os.Signal, 1),
currentRuleSet: 1,
ctx: ctx,
cancelFnc: cancelFnc,
selfTester: selfTester,
}
m.apiServer.module = m
seclog.SetPatterns(cfg.LogPatterns)
sapi.RegisterSecurityModuleServer(m.grpcServer, m.apiServer)
return m, nil
}
|
[
"\"RUNTIME_SECURITY_TESTSUITE\"",
"\"STATSD_URL\""
] |
[] |
[
"RUNTIME_SECURITY_TESTSUITE",
"STATSD_URL"
] |
[]
|
["RUNTIME_SECURITY_TESTSUITE", "STATSD_URL"]
|
go
| 2 | 0 | |
tests/celery_test.py
|
import os
from pathlib import Path
from celery import Celery
from app.motivator.motivator_bot.telegram_bot import motivator
celery_test_app_name = Path(__file__).stem
app = Celery(celery_test_app_name, broker='pyamqp://guest@localhost//')
@app.task
def sup():
"""
very bad code only for tests
"""
return motivator.bot.send_message(chat_id=os.environ.get('SAB_ID'), text="HEY")
app.conf.beat_schedule = {
"see-you-in-ten-seconds-task": {
"task": f"tests.{celery_test_app_name}.sup",
"schedule": 5.0,
}
}
# test script
# if __name__ == '__main__':
# import sys
# with CeleryFacadeTester():
# print('SLEEP')
# time.sleep(30)
# print('WAKE UP TO KILL CELERY')
|
[] |
[] |
[
"SAB_ID"
] |
[]
|
["SAB_ID"]
|
python
| 1 | 0 | |
tests/query_test/test_nested_types.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from subprocess import check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfOldAggsJoins, SkipIfIsilon, SkipIfS3, SkipIfLocal
from tests.util.filesystem_utils import WAREHOUSE, get_fs_path
@SkipIfOldAggsJoins.nested_types
class TestNestedTypes(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestNestedTypes, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'parquet')
def test_scanner_basic(self, vector):
"""Queries that do not materialize arrays."""
self.run_test_case('QueryTest/nested-types-scanner-basic', vector)
def test_scanner_array_materialization(self, vector):
"""Queries that materialize arrays."""
self.run_test_case('QueryTest/nested-types-scanner-array-materialization', vector)
def test_scanner_multiple_materialization(self, vector):
"""Queries that materialize the same array multiple times."""
self.run_test_case('QueryTest/nested-types-scanner-multiple-materialization', vector)
def test_scanner_position(self, vector):
"""Queries that materialize the artifical position element."""
self.run_test_case('QueryTest/nested-types-scanner-position', vector)
def test_scanner_map(self, vector):
"""Queries that materialize maps. (Maps looks like arrays of key/value structs, so
most map functionality is already tested by the array tests.)"""
self.run_test_case('QueryTest/nested-types-scanner-maps', vector)
def test_runtime(self, vector):
"""Queries that send collections through the execution runtime."""
self.run_test_case('QueryTest/nested-types-runtime', vector)
def test_subplan(self, vector):
"""Test subplans with various exec nodes inside it."""
self.run_test_case('QueryTest/nested-types-subplan', vector)
def test_with_clause(self, vector):
"""Queries using nested types and with WITH clause."""
self.run_test_case('QueryTest/nested-types-with-clause', vector)
@SkipIfLocal.mem_usage_different
def test_tpch(self, vector):
"""Queries over the larger nested TPCH dataset."""
self.run_test_case('QueryTest/nested-types-tpch', vector)
def test_parquet_stats(self, vector):
"""Queries that test evaluation of Parquet row group statistics."""
# The test makes assumptions about the number of row groups that are processed and
# skipped inside a fragment, so we ensure that the tests run in a single fragment.
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/nested-types-parquet-stats', vector)
@SkipIfOldAggsJoins.nested_types
class TestParquetArrayEncodings(ImpalaTestSuite):
TESTFILE_DIR = os.path.join(os.environ['IMPALA_HOME'],
"testdata/parquet_nested_types_encodings")
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquetArrayEncodings, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'parquet')
# $ parquet-tools schema SingleFieldGroupInList.parquet
# message SingleFieldGroupInList {
# optional group single_element_groups (LIST) {
# repeated group single_element_group {
# required int64 count;
# }
# }
# }
#
# $ parquet-tools cat SingleFieldGroupInList.parquet
# single_element_groups:
# .single_element_group:
# ..count = 1234
# .single_element_group:
# ..count = 2345
def test_single_field_group_in_list(self, vector, unique_database):
tablename = "SingleFieldGroupInList"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename, "SingleFieldGroupInList.parquet",
"col1 array<struct<count: bigint>>")
result = self.client.execute("select item.count from %s.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute("select item.count from %s t, t.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['2']
# $ parquet-tools schema AvroPrimitiveInList.parquet
# message AvroPrimitiveInList {
# required group list_of_ints (LIST) {
# repeated int32 array;
# }
# }
#
# $ parquet-tools cat AvroPrimitiveInList.parquet
# list_of_ints:
# .array = 34
# .array = 35
# .array = 36
def test_avro_primitive_in_list(self, vector, unique_database):
tablename = "AvroPrimitiveInList"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename, "AvroPrimitiveInList.parquet",
"col1 array<int>")
result = self.client.execute("select item from %s.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute("select item from %s t, t.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['3']
# $ parquet-tools schema AvroSingleFieldGroupInList.parquet
# message AvroSingleFieldGroupInList {
# optional group single_element_groups (LIST) {
# repeated group array {
# required int64 count;
# }
# }
# }
#
# $ parquet-tools cat AvroSingleFieldGroupInList.parquet
# single_element_groups:
# .array:
# ..count = 1234
# .array:
# ..count = 2345
def test_avro_single_field_group_in_list(self, vector, unique_database):
tablename = "AvroSingleFieldGroupInList"
full_name = "%s.%s" % (unique_database, tablename)
# Note that the field name does not match the field name in the file schema.
self._create_test_table(unique_database, tablename,
"AvroSingleFieldGroupInList.parquet", "col1 array<struct<f1: bigint>>")
result = self.client.execute("select item.f1 from %s.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute("select item.f1 from %s t, t.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['2']
# $ parquet-tools schema bad-avro.parquet
# message org.apache.spark.sql.execution.datasources.parquet.test.avro.AvroArrayOfArray {
# required group int_arrays_column (LIST) {
# repeated group array (LIST) {
# repeated int32 array;
# }
# }
# }
#
# $ parquet-tools cat bad-avro.parquet
# int_arrays_column:
# .array:
# ..array = 0
# ..array = 1
# ..array = 2
# .array:
# ..array = 3
# ..array = 4
# ..array = 5
# .array:
# ..array = 6
# ..array = 7
# ..array = 8
#
# int_arrays_column:
# .array:
# ..array = 0
# ..array = 1
# ..array = 2
# .array:
# ..array = 3
# ..array = 4
# ..array = 5
# .array:
# ..array = 6
# ..array = 7
# ..array = 8
#
# [Same int_arrays_column repeated 8x more]
def test_avro_array_of_arrays(self, vector, unique_database):
tablename = "AvroArrayOfArrays"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename, "bad-avro.parquet",
"col1 array<array<int>>")
result = self.client.execute("select item from %s.col1.item" % full_name)
assert len(result.data) == 9 * 10
assert result.data == ['0', '1', '2', '3', '4', '5', '6', '7', '8'] * 10
result = self.client.execute(
"select a2.item from %s t, t.col1 a1, a1.item a2" % full_name)
assert len(result.data) == 9 * 10
assert result.data == ['0', '1', '2', '3', '4', '5', '6', '7', '8'] * 10
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 10
assert result.data == ['3'] * 10
result = self.client.execute(
"select cnt from %s t, t.col1 a1, (select count(*) cnt from a1.item) v" % full_name)
assert len(result.data) == 3 * 10
assert result.data == ['3', '3', '3'] * 10
# $ parquet-tools schema ThriftPrimitiveInList.parquet
# message ThriftPrimitiveInList {
# required group list_of_ints (LIST) {
# repeated int32 list_of_ints_tuple;
# }
# }
#
# $ parquet-tools cat ThriftPrimitiveInList.parquet
# list_of_ints:
# .list_of_ints_tuple = 34
# .list_of_ints_tuple = 35
# .list_of_ints_tuple = 36
def test_thrift_primitive_in_list(self, vector, unique_database):
tablename = "ThriftPrimitiveInList"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename,
"ThriftPrimitiveInList.parquet", "col1 array<int>")
result = self.client.execute("select item from %s.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute("select item from %s t, t.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['3']
# $ parquet-tools schema ThriftSingleFieldGroupInList.parquet
# message ThriftSingleFieldGroupInList {
# optional group single_element_groups (LIST) {
# repeated group single_element_groups_tuple {
# required int64 count;
# }
# }
# }
#
# $ parquet-tools cat ThriftSingleFieldGroupInList.parquet
# single_element_groups:
# .single_element_groups_tuple:
# ..count = 1234
# .single_element_groups_tuple:
# ..count = 2345
def test_thrift_single_field_group_in_list(self, vector, unique_database):
tablename = "ThriftSingleFieldGroupInList"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename,
"ThriftSingleFieldGroupInList.parquet", "col1 array<struct<f1: bigint>>")
result = self.client.execute("select item.f1 from %s.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute("select item.f1 from %s t, t.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1234', '2345']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['2']
# $ parquet-tools schema bad-thrift.parquet
# message ParquetSchema {
# required group intListsColumn (LIST) {
# repeated group intListsColumn_tuple (LIST) {
# repeated int32 intListsColumn_tuple_tuple;
# }
# }
# }
#
# $ parquet-tools cat bad-thrift.parquet
# intListsColumn:
# .intListsColumn_tuple:
# ..intListsColumn_tuple_tuple = 0
# ..intListsColumn_tuple_tuple = 1
# ..intListsColumn_tuple_tuple = 2
# .intListsColumn_tuple:
# ..intListsColumn_tuple_tuple = 3
# ..intListsColumn_tuple_tuple = 4
# ..intListsColumn_tuple_tuple = 5
# .intListsColumn_tuple:
# ..intListsColumn_tuple_tuple = 6
# ..intListsColumn_tuple_tuple = 7
# ..intListsColumn_tuple_tuple = 8
def test_thrift_array_of_arrays(self, vector, unique_database):
tablename = "ThriftArrayOfArrays"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename, "bad-thrift.parquet",
"col1 array<array<int>>")
result = self.client.execute("select item from %s.col1.item" % full_name)
assert len(result.data) == 9
assert result.data == ['0', '1', '2', '3', '4', '5', '6', '7', '8']
result = self.client.execute(
"select a2.item from %s t, t.col1 a1, a1.item a2" % full_name)
assert len(result.data) == 9
assert result.data == ['0', '1', '2', '3', '4', '5', '6', '7', '8']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['3']
result = self.client.execute(
"select cnt from %s t, t.col1 a1, (select count(*) cnt from a1.item) v" % full_name)
assert len(result.data) == 3
assert result.data == ['3', '3', '3']
# $ parquet-tools schema UnannotatedListOfPrimitives.parquet
# message UnannotatedListOfPrimitives {
# repeated int32 list_of_ints;
# }
#
# $ parquet-tools cat UnannotatedListOfPrimitives.parquet
# list_of_ints = 34
# list_of_ints = 35
# list_of_ints = 36
def test_unannotated_list_of_primitives(self, vector, unique_database):
tablename = "UnannotatedListOfPrimitives"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename,
"UnannotatedListOfPrimitives.parquet", "col1 array<int>")
result = self.client.execute("select item from %s.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute("select item from %s t, t.col1" % full_name)
assert len(result.data) == 3
assert result.data == ['34', '35', '36']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['3']
# $ parquet-tools schema UnannotatedListOfGroups.parquet
# message UnannotatedListOfGroups {
# repeated group list_of_points {
# required float x;
# required float y;
# }
# }
#
# $ parquet-tools cat UnannotatedListOfGroups.parquet
# list_of_points:
# .x = 1.0
# .y = 1.0
# list_of_points:
# .x = 2.0
# .y = 2.0
def test_unannotated_list_of_groups(self, vector, unique_database):
tablename = "UnannotatedListOfGroups"
full_name = "%s.%s" % (unique_database, tablename)
self._create_test_table(unique_database, tablename,
"UnannotatedListOfGroups.parquet", "col1 array<struct<f1: float, f2: float>>")
result = self.client.execute("select f1, f2 from %s.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1\t1', '2\t2']
result = self.client.execute("select f1, f2 from %s t, t.col1" % full_name)
assert len(result.data) == 2
assert result.data == ['1\t1', '2\t2']
result = self.client.execute(
"select cnt from %s t, (select count(*) cnt from t.col1) v" % full_name)
assert len(result.data) == 1
assert result.data == ['2']
# $ parquet-tools schema AmbiguousList_Modern.parquet
# message org.apache.impala.nested {
# required group ambigArray (LIST) {
# repeated group list {
# required group element {
# required group s2 {
# optional int32 f21;
# optional int32 f22;
# }
# optional int32 F11;
# optional int32 F12;
# }
# }
# }
# }
# $ parquet-tools cat AmbiguousList_Modern.parquet
# ambigArray:
# .list:
# ..element:
# ...s2:
# ....f21 = 21
# ....f22 = 22
# ...F11 = 11
# ...F12 = 12
# .list:
# ..element:
# ...s2:
# ....f21 = 210
# ....f22 = 220
# ...F11 = 110
# ...F12 = 120
#
# $ parquet-tools schema AmbiguousList_Legacy.parquet
# message org.apache.impala.nested {
# required group ambigArray (LIST) {
# repeated group array {
# required group s2 {
# optional int32 f21;
# optional int32 f22;
# }
# optional int32 F11;
# optional int32 F12;
# }
# }
# }
# $ parquet-tools cat AmbiguousList_Legacy.parquet
# ambigArray:
# .array:
# ..s2:
# ...f21 = 21
# ...f22 = 22
# ..F11 = 11
# ..F12 = 12
# .array:
# ..s2:
# ...f21 = 210
# ...f22 = 220
# ..F11 = 110
# ..F12 = 120
def test_ambiguous_list(self, vector, unique_database):
"""IMPALA-4725: Tests the schema-resolution behavior with different values for the
PARQUET_ARRAY_RESOLUTION and PARQUET_FALLBACK_SCHEMA_RESOLUTION query options.
The schema of the Parquet test files is constructed to induce incorrect results
with index-based resolution and the default TWO_LEVEL_THEN_THREE_LEVEL array
resolution policy. Regardless of whether the Parquet data files use the 2-level or
3-level encoding, incorrect results may be returned if the array resolution does
not exactly match the data files'. The name-based policy generally does not have
this problem because it avoids traversing incorrect schema paths.
"""
ambig_modern_tbl = "ambig_modern"
self._create_test_table(unique_database, ambig_modern_tbl,
"AmbiguousList_Modern.parquet",
"ambigarray array<struct<s2:struct<f21:int,f22:int>,f11:int,f12:int>>")
self.run_test_case('QueryTest/parquet-ambiguous-list-modern',
vector, unique_database)
ambig_legacy_tbl = "ambig_legacy"
self._create_test_table(unique_database, ambig_legacy_tbl,
"AmbiguousList_Legacy.parquet",
"ambigarray array<struct<s2:struct<f21:int,f22:int>,f11:int,f12:int>>")
self.run_test_case('QueryTest/parquet-ambiguous-list-legacy',
vector, unique_database)
def _create_test_table(self, dbname, tablename, filename, columns):
"""Creates a table in the given database with the given name and columns. Copies
the file with the given name from TESTFILE_DIR into the table."""
location = get_fs_path("/test-warehouse/%s.db/%s" % (dbname, tablename))
self.client.execute("create table %s.%s (%s) stored as parquet location '%s'" %
(dbname, tablename, columns, location))
local_path = self.TESTFILE_DIR + "/" + filename
check_call(["hadoop", "fs", "-put", local_path, location], shell=False)
@SkipIfOldAggsJoins.nested_types
class TestMaxNestingDepth(ImpalaTestSuite):
# Should be kept in sync with the FE's Type.MAX_NESTING_DEPTH
MAX_NESTING_DEPTH = 100
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestMaxNestingDepth, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'parquet')
def test_max_nesting_depth(self, vector, unique_database):
"""Tests that Impala can scan Parquet files having complex types of
the maximum nesting depth."""
check_call(["hdfs", "dfs", "-copyFromLocal",
"%s/testdata/max_nesting_depth" % os.environ['IMPALA_HOME'],
"%s/%s.db/" % (WAREHOUSE, unique_database)], shell=False)
self.run_test_case('QueryTest/max-nesting-depth', vector, unique_database)
@SkipIfIsilon.hive
@SkipIfS3.hive
@SkipIfLocal.hive
def test_load_hive_table(self, vector, unique_database):
"""Tests that Impala rejects Hive-created tables with complex types that exceed
the maximum nesting depth."""
# Type with a nesting depth of MAX_NESTING_DEPTH + 1
type_sql = ("array<" * self.MAX_NESTING_DEPTH) + "int" +\
(">" * self.MAX_NESTING_DEPTH)
create_table_sql = "CREATE TABLE %s.above_max_depth (f %s) STORED AS PARQUET" %\
(unique_database, type_sql)
self.run_stmt_in_hive(create_table_sql)
self.client.execute("invalidate metadata %s.above_max_depth" % unique_database)
try:
self.client.execute("explain select 1 from %s.above_max_depth" % unique_database)
assert False, "Expected table loading to fail."
except ImpalaBeeswaxException, e:
assert "Type exceeds the maximum nesting depth" in str(e)
|
[] |
[] |
[
"IMPALA_HOME"
] |
[]
|
["IMPALA_HOME"]
|
python
| 1 | 0 | |
suzieq/restServer/sq_rest_server.py
|
#!/usr/bin/env python3
import uvicorn
import argparse
import sys
import yaml
import os
from suzieq.utils import load_sq_config, get_sq_install_dir
from suzieq.restServer.query import app_init
def get_cert_files(cfg):
sqdir = get_sq_install_dir()
ssl_certfile = cfg.get('rest', {}) \
.get('rest_certfile', f'{sqdir}/config/etc/cert.pem')
ssl_keyfile = cfg.get('rest', {}) \
.get('rest_keyfile', f'{sqdir}/config/etc/key.pem')
if not os.path.isfile(ssl_certfile):
print(f"ERROR: Missing certificate file: {ssl_certfile}")
sys.exit(1)
if not os.path.isfile(ssl_keyfile):
print(f"ERROR: Missing certificate file: {ssl_keyfile}")
sys.exit(1)
return ssl_keyfile, ssl_certfile
def get_log_file(cfg):
tempdir = cfg.get('temp-directory', '/tmp')
if not os.path.exists(tempdir):
os.makedirs(tempdir, exist_ok=True)
return f"{tempdir}/sq-rest-server.log"
def get_log_config(cfg):
log_config = uvicorn.config.LOGGING_CONFIG
log_config['handlers']['access']['class'] = 'logging.handlers.RotatingFileHandler'
log_config['handlers']['access']['maxBytes'] = 10000000
log_config['handlers']['access']['backupCount'] = 2
log_config['handlers']['default']['class'] = 'logging.handlers.RotatingFileHandler'
log_config['handlers']['default']['maxBytes'] = 10_000_000
log_config['handlers']['default']['backupCount'] = 2
log_config['handlers']['access']['filename'] = get_log_file(cfg)
del(log_config['handlers']['access']['stream'])
log_config['handlers']['default']['filename'] = get_log_file(cfg)
del(log_config['handlers']['default']['stream'])
return log_config
def rest_main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
type=str, help="alternate config file",
default=f'{os.getenv("HOME")}/.suzieq/suzieq-cfg.yml'
)
userargs = parser.parse_args()
app = app_init(userargs.config)
cfg = load_sq_config(config_file=userargs.config)
try:
api_key = cfg['rest']['API_KEY']
except KeyError:
print('missing API_KEY in config file')
exit(1)
log_level = cfg.get('rest', {}).get('logging-level', 'INFO').lower()
ssl_keyfile, ssl_certfile = get_cert_files(cfg)
srvr_addr = cfg.get('rest', {}).get('address', '127.0.0.1')
srvr_port = cfg.get('rest', {}).get('port', 8000)
uvicorn.run(app, host=srvr_addr, port=srvr_port,
log_level=log_level,
log_config=get_log_config(cfg),
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile)
if __name__ == "__main__":
rest_main()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
stac_fastapi/sqlalchemy/tests/resources/test_item.py
|
import json
import os
import time
import uuid
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from random import randint
from urllib.parse import parse_qs, urlparse, urlsplit
import pystac
from pydantic.datetime_parse import parse_datetime
from pystac.utils import datetime_to_str
from shapely.geometry import Polygon
from stac_fastapi.sqlalchemy.core import CoreCrudClient
from stac_fastapi.types.core import LandingPageMixin
from stac_fastapi.types.rfc3339 import rfc3339_str_to_datetime
def test_create_and_delete_item(app_client, load_test_data):
"""Test creation and deletion of a single item (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{resp.json()['id']}"
)
assert resp.status_code == 200
def test_create_item_conflict(app_client, load_test_data):
"""Test creation of an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
def test_create_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# add test_item to test-collection again, resource already exists
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2, posts successfully
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
def test_delete_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# delete test_item from test-collection
test_item["collection"] = "test-collection"
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
# test-item in test-collection has already been deleted
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 404
# test-item in test-collection-2 still exists, was not deleted
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
def test_update_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# update gsd in test_item, test-collection-2
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
# update gsd in test_item, test-collection
test_item["collection"] = "test-collection"
test_item["properties"]["gsd"] = 17
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 17
# test_item in test-collection, updated gsd = 17
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 17
# test_item in test-collection-2, updated gsd = 16
test_item["collection"] = "test-collection-2"
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 16
def test_delete_missing_item(app_client, load_test_data):
"""Test deletion of an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.delete(f"/collections/{test_item['collection']}/items/hijosh")
assert resp.status_code == 404
def test_create_item_missing_collection(app_client, load_test_data):
"""Test creation of an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
test_item["collection"] = "stac is cool"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 422
def test_update_item_already_exists(app_client, load_test_data):
"""Test updating an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
assert test_item["properties"]["gsd"] != 16
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
def test_update_new_item(app_client, load_test_data):
"""Test updating an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_missing_collection(app_client, load_test_data):
"""Test updating an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Try to update collection of the item
test_item["collection"] = "stac is cool"
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_geometry(app_client, load_test_data):
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Update the geometry of the item
test_item["geometry"]["coordinates"] = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Fetch the updated item
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
assert resp.json()["geometry"]["coordinates"] == [
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
]
def test_get_item(app_client, load_test_data):
"""Test read an item by id (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert get_item.status_code == 200
def test_returns_valid_item(app_client, load_test_data):
"""Test validates fetched item with jsonschema"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert get_item.status_code == 200
item_dict = get_item.json()
# Mock root to allow validation
mock_root = pystac.Catalog(
id="test", description="test desc", href="https://example.com"
)
item = pystac.Item.from_dict(item_dict, preserve_dict=False, root=mock_root)
item.validate()
def test_get_item_collection(app_client, load_test_data):
"""Test read an item collection (core)"""
item_count = randint(1, 4)
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(f"/collections/{test_item['collection']}/items")
assert resp.status_code == 200
item_collection = resp.json()
assert item_collection["context"]["matched"] == len(range(item_count))
def test_pagination(app_client, load_test_data):
"""Test item collection pagination (paging extension)"""
item_count = 10
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(
f"/collections/{test_item['collection']}/items", params={"limit": 3}
)
assert resp.status_code == 200
first_page = resp.json()
assert first_page["context"]["returned"] == 3
url_components = urlsplit(first_page["links"][0]["href"])
resp = app_client.get(f"{url_components.path}?{url_components.query}")
assert resp.status_code == 200
second_page = resp.json()
assert second_page["context"]["returned"] == 3
def test_item_timestamps(app_client, load_test_data):
"""Test created and updated timestamps (common metadata)"""
test_item = load_test_data("test_item.json")
start_time = datetime.now(timezone.utc)
time.sleep(2)
# Confirm `created` timestamp
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
item = resp.json()
created_dt = parse_datetime(item["properties"]["created"])
assert resp.status_code == 200
assert start_time < created_dt < datetime.now(timezone.utc)
time.sleep(2)
# Confirm `updated` timestamp
item["properties"]["proj:epsg"] = 4326
resp = app_client.put(f"/collections/{test_item['collection']}/items", json=item)
assert resp.status_code == 200
updated_item = resp.json()
# Created shouldn't change on update
assert item["properties"]["created"] == updated_item["properties"]["created"]
assert parse_datetime(updated_item["properties"]["updated"]) > created_dt
def test_item_search_by_id_post(app_client, load_test_data):
"""Test POST search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": [test_item["collection"]], "ids": ids}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_spatial_query_post(app_client, load_test_data):
"""Test POST search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_query_post(app_client, load_test_data):
"""Test POST search with single-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"../{datetime_to_str(item_date)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_post(app_client, load_test_data):
"""Test POST search with two-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_open_window(app_client, load_test_data):
"""Test POST search with open spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
for dt in ["/", "../", "/..", "../.."]:
resp = app_client.post("/search", json={"datetime": dt})
assert resp.status_code == 400
def test_item_search_sort_post(app_client, load_test_data):
"""Test POST search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item['collection']}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item['collection']}/items", json=second_item
)
assert resp.status_code == 200
params = {
"collections": [first_item["collection"]],
"sortby": [{"field": "datetime", "direction": "desc"}],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_by_id_get(app_client, load_test_data):
"""Test GET search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": test_item["collection"], "ids": ",".join(ids)}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_bbox_get(app_client, load_test_data):
"""Test GET search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_get_without_collections(app_client, load_test_data):
"""Test GET search without specifying collections"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_get(app_client, load_test_data):
"""Test GET search with spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_sort_get(app_client, load_test_data):
"""Test GET search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item['collection']}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item['collection']}/items", json=second_item
)
assert resp.status_code == 200
params = {"collections": [first_item["collection"]], "sortby": "-datetime"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_post_without_collection(app_client, load_test_data):
"""Test POST search without specifying a collection"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": test_item["bbox"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_properties_jsonb(app_client, load_test_data):
"""Test POST search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {"query": {"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_properties_field(app_client, load_test_data):
"""Test POST search indexed field with query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Orientation is an indexed field
params = {"query": {"orientation": {"eq": "south"}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_get_query_extension(app_client, load_test_data):
"""Test GET search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {
"collections": [test_item["collection"]],
"query": json.dumps(
{"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}
),
}
resp = app_client.get("/search", params=params)
assert resp.json()["context"]["returned"] == 0
params["query"] = json.dumps(
{"proj:epsg": {"eq": test_item["properties"]["proj:epsg"]}}
)
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["context"]["returned"] == 1
assert (
resp_json["features"][0]["properties"]["proj:epsg"]
== test_item["properties"]["proj:epsg"]
)
def test_get_missing_item_collection(app_client):
"""Test reading a collection which does not exist"""
resp = app_client.get("/collections/invalid-collection/items")
assert resp.status_code == 200
def test_pagination_item_collection(app_client, load_test_data):
"""Test item collection pagination links (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
page = app_client.get(
f"/collections/{test_item['collection']}/items", params={"limit": 1}
)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
query_params = parse_qs(urlparse(next_link[0]["href"]).query)
page = app_client.get(
f"/collections/{test_item['collection']}/items",
params=query_params,
)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_post(app_client, load_test_data):
"""Test POST pagination (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
request_body = {"ids": ids, "limit": 1}
page = app_client.post("/search", json=request_body)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
# Merge request bodies
request_body.update(next_link[0]["body"])
page = app_client.post("/search", json=request_body)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_token_idempotent(app_client, load_test_data):
"""Test that pagination tokens are idempotent (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
page = app_client.get("/search", params={"ids": ",".join(ids), "limit": 3})
page_data = page.json()
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
# Confirm token is idempotent
resp1 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp2 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp1_data = resp1.json()
resp2_data = resp2.json()
# Two different requests with the same pagination token should return the same items
assert [item["id"] for item in resp1_data["features"]] == [
item["id"] for item in resp2_data["features"]
]
def test_field_extension_get(app_client, load_test_data):
"""Test GET search with included fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"fields": "+properties.proj:epsg,+properties.gsd"}
resp = app_client.get("/search", params=params)
feat_properties = resp.json()["features"][0]["properties"]
assert not set(feat_properties) - {"proj:epsg", "gsd", "datetime"}
def test_field_extension_post(app_client, load_test_data):
"""Test POST search with included and excluded fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["assets.B1"],
"include": ["properties.eo:cloud_cover", "properties.orientation"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "B1" not in resp_json["features"][0]["assets"].keys()
assert not set(resp_json["features"][0]["properties"]) - {
"orientation",
"eo:cloud_cover",
"datetime",
}
def test_field_extension_exclude_and_include(app_client, load_test_data):
"""Test POST search including/excluding same field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["properties.eo:cloud_cover"],
"include": ["properties.eo:cloud_cover"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "eo:cloud_cover" not in resp_json["features"][0]["properties"]
def test_field_extension_exclude_default_includes(app_client, load_test_data):
"""Test POST search excluding a forbidden field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {"fields": {"exclude": ["geometry"]}}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "geometry" not in resp_json["features"][0]
def test_search_intersects_and_bbox(app_client):
"""Test POST search intersects and bbox are mutually exclusive (core)"""
bbox = [-118, 34, -117, 35]
geoj = Polygon.from_bounds(*bbox).__geo_interface__
params = {"bbox": bbox, "intersects": geoj}
resp = app_client.post("/search", json=params)
assert resp.status_code == 400
def test_get_missing_item(app_client, load_test_data):
"""Test read item which does not exist (transactions extension)"""
test_coll = load_test_data("test_collection.json")
resp = app_client.get(f"/collections/{test_coll['id']}/items/invalid-item")
assert resp.status_code == 404
def test_search_invalid_query_field(app_client):
body = {"query": {"gsd": {"lt": 100}, "invalid-field": {"eq": 50}}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
def test_search_bbox_errors(app_client):
body = {"query": {"bbox": [0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
body = {"query": {"bbox": [100.0, 0.0, 0.0, 105.0, 1.0, 1.0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
params = {"bbox": "100.0,0.0,0.0,105.0"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 400
def test_conformance_classes_configurable():
"""Test conformance class configurability"""
landing = LandingPageMixin()
landing_page = landing._landing_page(
base_url="http://test/test",
conformance_classes=["this is a test"],
extension_schemas=[],
)
assert landing_page["conformsTo"][0] == "this is a test"
# Update environment to avoid key error on client instantiation
os.environ["READER_CONN_STRING"] = "testing"
os.environ["WRITER_CONN_STRING"] = "testing"
client = CoreCrudClient(base_conformance_classes=["this is a test"])
assert client.conformance_classes()[0] == "this is a test"
def test_search_datetime_validation_errors(app_client):
bad_datetimes = [
"37-01-01T12:00:27.87Z",
"1985-13-12T23:20:50.52Z",
"1985-12-32T23:20:50.52Z",
"1985-12-01T25:20:50.52Z",
"1985-12-01T00:60:50.52Z",
"1985-12-01T00:06:61.52Z",
"1990-12-31T23:59:61Z",
"1986-04-12T23:20:50.52Z/1985-04-12T23:20:50.52Z",
]
for dt in bad_datetimes:
body = {"query": {"datetime": dt}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
resp = app_client.get("/search?datetime={}".format(dt))
assert resp.status_code == 400
|
[] |
[] |
[
"READER_CONN_STRING",
"WRITER_CONN_STRING"
] |
[]
|
["READER_CONN_STRING", "WRITER_CONN_STRING"]
|
python
| 2 | 0 | |
ib_insync/ibcontroller.py
|
"""Programmatic control over the TWS/gateway client software."""
import asyncio
import configparser
import logging
import os
from contextlib import suppress
from dataclasses import dataclass
from typing import ClassVar, Union
from eventkit import Event
import ib_insync.util as util
from ib_insync.contract import Forex
from ib_insync.ib import IB
__all__ = ['IBC', 'IBController', 'Watchdog']
@dataclass
class IBC:
r"""
Programmatic control over starting and stopping TWS/Gateway
using IBC (https://github.com/IbcAlpha/IBC).
Args:
twsVersion (int): (required) The major version number for
TWS or gateway.
gateway (bool):
* True = gateway
* False = TWS
tradingMode (str): 'live' or 'paper'.
userid (str): IB account username. It is recommended to set the real
username/password in a secured IBC config file.
password (str): IB account password.
twsPath (str): Path to the TWS installation folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Applications
* Windows: C:\\Jts
twsSettingsPath (str): Path to the TWS settings folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Jts
* Windows: Not available
ibcPath (str): Path to the IBC installation folder.
Defaults:
* Linux: /opt/ibc
* OS X: /opt/ibc
* Windows: C:\\IBC
ibcIni (str): Path to the IBC configuration file.
Defaults:
* Linux: ~/ibc/config.ini
* OS X: ~/ibc/config.ini
* Windows: %%HOMEPATH%%\\Documents\IBC\\config.ini
javaPath (str): Path to Java executable.
Default is to use the Java VM included with TWS/gateway.
fixuserid (str): FIX account user id (gateway only).
fixpassword (str): FIX account password (gateway only).
This is not intended to be run in a notebook.
To use IBC on Windows, the proactor (or quamash) event loop
must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
Example usage:
.. code-block:: python
ibc = IBC(976, gateway=True, tradingMode='live',
userid='edemo', password='demouser')
ibc.start()
IB.run()
"""
IbcLogLevel: ClassVar = logging.DEBUG
twsVersion: int = 0
gateway: bool = False
tradingMode: str = ''
twsPath: str = ''
twsSettingsPath: str = ''
ibcPath: str = ''
ibcIni: str = ''
javaPath: str = ''
userid: str = ''
password: str = ''
fixuserid: str = ''
fixpassword: str = ''
def __post_init__(self):
self._isWindows = os.sys.platform == 'win32'
if not self.ibcPath:
self.ibcPath = '/opt/ibc' if not self._isWindows else 'C:\\IBC'
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBC')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# map from field names to cmd arguments; key=(UnixArg, WindowsArg)
args = dict(
twsVersion=('', ''),
gateway=('--gateway', '/Gateway'),
tradingMode=('--mode=', '/Mode:'),
twsPath=('--tws-path=', '/TwsPath:'),
twsSettingsPath=('--tws-settings-path=', ''),
ibcPath=('--ibc-path=', '/IbcPath:'),
ibcIni=('--ibc-ini=', '/Config:'),
javaPath=('--java-path=', '/JavaPath:'),
userid=('--user=', '/User:'),
password=('--pw=', '/PW:'),
fixuserid=('--fix-user=', '/FIXUser:'),
fixpassword=('--fix-pw=', '/FIXPW:'))
# create shell command
cmd = [
f'{self.ibcPath}\\scripts\\StartIBC.bat' if self._isWindows else
f'{self.ibcPath}/scripts/ibcstart.sh']
for k, v in util.dataclassAsDict(self).items():
arg = args[k][self._isWindows]
if v:
if arg.endswith('=') or arg.endswith(':'):
cmd.append(f'{arg}{v}')
elif arg:
cmd.append(arg)
else:
cmd.append(str(v))
# run shell command
self._proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
if self._monitor:
self._monitor.cancel()
self._monitor = None
if self._isWindows:
import subprocess
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(self._proc.pid)])
else:
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.log(IBC.IbcLogLevel, line.strip().decode())
@dataclass
class IBController:
"""
For new installations it is recommended to use IBC instead.
Programmatic control over starting and stopping TWS/Gateway
using IBController (https://github.com/ib-controller/ib-controller).
On Windows the the proactor (or quamash) event loop must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
This is not intended to be run in a notebook.
"""
APP: str = 'TWS' # 'TWS' or 'GATEWAY'
TWS_MAJOR_VRSN: str = '969'
TRADING_MODE: str = 'live' # 'live' or 'paper'
IBC_INI: str = '~/IBController/IBController.ini'
IBC_PATH: str = '~/IBController'
TWS_PATH: str = '~/Jts'
LOG_PATH: str = '~/IBController/Logs'
TWSUSERID: str = ''
TWSPASSWORD: str = ''
JAVA_PATH: str = ''
TWS_CONFIG_PATH: str = ''
def __post_init__(self):
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBController')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def stop(self):
"""Cleanly shutdown TWS/IBG."""
util.run(self.stopAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# expand paths
d = util.dataclassAsDict(self)
for k, v in d.items():
if k.endswith('_PATH') or k.endswith('_INI'):
d[k] = os.path.expanduser(v)
if not d['TWS_CONFIG_PATH']:
d['TWS_CONFIG_PATH'] = d['TWS_PATH']
self.__dict__.update(**d)
# run shell command
ext = 'bat' if os.sys.platform == 'win32' else 'sh'
cmd = f'{d["IBC_PATH"]}/Scripts/DisplayBannerAndLaunch.{ext}'
env = {**os.environ, **d}
self._proc = await asyncio.create_subprocess_exec(
cmd, env=env, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def stopAsync(self):
if not self._proc:
return
self._logger.info('Stopping')
# read ibcontroller ini file to get controller port
txt = '[section]' + open(self.IBC_INI).read()
config = configparser.ConfigParser()
config.read_string(txt)
contrPort = config.getint('section', 'IbControllerPort')
_reader, writer = await asyncio.open_connection('127.0.0.1', contrPort)
writer.write(b'STOP')
await writer.drain()
writer.close()
await self._proc.wait()
self._proc = None
self._monitor.cancel()
self._monitor = None
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
self._monitor.cancel()
self._monitor = None
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.info(line.strip().decode())
@dataclass
class Watchdog:
"""
Start, connect and watch over the TWS or gateway app and try to keep it
up and running. It is intended to be used in an event-driven
application that properly initializes itself upon (re-)connect.
It is not intended to be used in a notebook or in imperative-style code.
Do not expect Watchdog to magically shield you from reality. Do not use
Watchdog unless you understand what it does and doesn't do.
Args:
controller (Union[IBC, IBController]): (required) IBC or IBController
instance.
ib (IB): (required) IB instance to be used. Do no connect this
instance as Watchdog takes care of that.
host (str): Used for connecting IB instance.
port (int): Used for connecting IB instance.
clientId (int): Used for connecting IB instance.
connectTimeout (float): Used for connecting IB instance.
appStartupTime (float): Time (in seconds) that the app is given
to start up. Make sure that it is given ample time.
appTimeout (float): Timeout (in seconds) for network traffic idle time.
retryDelay (float): Time (in seconds) to restart app after a
previous failure.
The idea is to wait until there is no traffic coming from the app for
a certain amount of time (the ``appTimeout`` parameter). This triggers
a historical request to be placed just to see if the app is still alive
and well. If yes, then continue, if no then restart the whole app
and reconnect. Restarting will also occur directly on errors 1100 and 100.
Example usage:
.. code-block:: python
def onConnected():
print(ib.accountValues())
ibc = IBC(974, gateway=True, tradingMode='paper')
ib = IB()
ib.connectedEvent += onConnected
watchdog = Watchdog(ibc, ib, port=4002)
watchdog.start()
ib.run()
Events:
* ``startingEvent`` (watchdog: :class:`.Watchdog`)
* ``startedEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppingEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppedEvent`` (watchdog: :class:`.Watchdog`)
* ``softTimeoutEvent`` (watchdog: :class:`.Watchdog`)
* ``hardTimeoutEvent`` (watchdog: :class:`.Watchdog`)
"""
events = [
'startingEvent', 'startedEvent', 'stoppingEvent', 'stoppedEvent',
'softTimeoutEvent', 'hardTimeoutEvent']
controller: Union[IBC, IBController]
ib: IB
host: str = '127.0.0.1'
port: int = 7497
clientId: int = 1
connectTimeout: float = 2
appStartupTime: float = 30
appTimeout: float = 20
retryDelay: float = 2
def __post_init__(self):
self.startingEvent = Event('startingEvent')
self.startedEvent = Event('startedEvent')
self.stoppingEvent = Event('stoppingEvent')
self.stoppedEvent = Event('stoppedEvent')
self.softTimeoutEvent = Event('softTimeoutEvent')
self.hardTimeoutEvent = Event('hardTimeoutEvent')
if not self.controller:
raise ValueError('No controller supplied')
if not self.ib:
raise ValueError('No IB instance supplied')
if self.ib.isConnected():
raise ValueError('IB instance must not be connected')
assert 0 < self.appTimeout < 60
assert self.retryDelay > 0
self._runner = None
self._logger = logging.getLogger('ib_insync.Watchdog')
def start(self):
self._logger.info('Starting')
self.startingEvent.emit(self)
self._runner = asyncio.ensure_future(self.runAsync())
def stop(self):
self._logger.info('Stopping')
self.stoppingEvent.emit(self)
self.ib.disconnect()
self._runner = None
async def runAsync(self):
def onTimeout(idlePeriod):
if not waiter.done():
waiter.set_result(None)
def onError(reqId, errorCode, errorString, contract):
if errorCode in [1100, 100] and not waiter.done():
waiter.set_exception(Warning(f'Error {errorCode}'))
def onDisconnected():
if not waiter.done():
waiter.set_exception(Warning('Disconnected'))
while self._runner:
try:
await self.controller.startAsync()
await asyncio.sleep(self.appStartupTime)
await self.ib.connectAsync(
self.host, self.port, self.clientId, self.connectTimeout)
self.startedEvent.emit(self)
self.ib.setTimeout(self.appTimeout)
self.ib.timeoutEvent += onTimeout
self.ib.errorEvent += onError
self.ib.disconnectedEvent += onDisconnected
while self._runner:
waiter = asyncio.Future()
await waiter
# soft timeout, probe the app with a historical request
self._logger.debug('Soft timeout')
self.softTimeoutEvent.emit(self)
probe = self.ib.reqHistoricalDataAsync(
Forex('EURUSD'), '', '30 S', '5 secs',
'MIDPOINT', False)
bars = None
with suppress(asyncio.TimeoutError):
bars = await asyncio.wait_for(probe, 4)
if not bars:
self.hardTimeoutEvent.emit(self)
raise Warning('Hard timeout')
self.ib.setTimeout(self.appTimeout)
except ConnectionRefusedError:
pass
except Warning as w:
self._logger.warning(w)
except Exception as e:
self._logger.exception(e)
finally:
self.ib.timeoutEvent -= onTimeout
self.ib.errorEvent -= onError
self.ib.disconnectedEvent -= onDisconnected
await self.controller.terminateAsync()
self.stoppedEvent.emit(self)
if self._runner:
await asyncio.sleep(self.retryDelay)
if __name__ == '__main__':
asyncio.get_event_loop().set_debug(True)
util.logToConsole(logging.DEBUG)
ibc = IBC(976, gateway=True, tradingMode='paper')
# userid='edemo', password='demouser')
ib = IB()
app = Watchdog(ibc, ib, port=4002, appStartupTime=15, appTimeout=10)
app.start()
IB.run()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"github.com/bwmarrin/discordgo"
"github.com/gohonduras/discord-bot/hackernews"
"github.com/sirupsen/logrus"
prefixed "github.com/x-cray/logrus-prefixed-formatter"
)
var (
log = logrus.WithField("prefix", "main")
)
// Message handler implements several functions which will
// be in charge of responding to discord messages the bot
// observes.
type messageHandler struct {
ctx context.Context
cancel context.CancelFunc
}
// The init function runs on package initialization, helping us setup
// some useful globals such as a logging formatter.
func init() {
formatter := new(prefixed.TextFormatter)
formatter.TimestampFormat = "2020-01-01 07:12:23"
formatter.FullTimestamp = true
logrus.SetFormatter(formatter)
}
func main() {
token := os.Getenv("DISCORD_TOKEN")
if token == "" {
log.Fatalf("Expected DISCORD_TOKEN env var, provided none")
}
// Create a new Discord session using the provided bot token.
dg, err := discordgo.New(fmt.Sprintf("Bot %s", token))
if err != nil {
log.Fatalf("Could not initialize discord session: %v", err)
}
// Open a websocket connection to Discord and begin listening.
err = dg.Open()
if err != nil {
log.Fatalf("Error opening connection: %v", err)
}
// We initialize a new context with a cancelation function, useful
// for cleanup of every possible goroutine on SIGTERM.
ctx, cancel := context.WithCancel(context.Background())
handler := &messageHandler{
ctx: ctx,
cancel: cancel,
}
// Go hacker news handler.
dg.AddHandler(handler.hackerNewsHandler)
// Wait here until SIGTERM or another interruption signal is received.
log.Println("Bot is now running, press ctrl-c to exit")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close down the Discord session and cancel the global
// context gracefully.
cancel()
if err := dg.Close(); err != nil {
log.Fatalf("Could not gracefully stop discord session: %v", err)
}
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the autenticated bot has access to.
func (mh *messageHandler) hackerNewsHandler(s *discordgo.Session, m *discordgo.MessageCreate) {
// Ignore all messages created by the bot itself.
if m.Author.ID == s.State.User.ID {
return
}
commandPrefix := "!hackernews"
if !strings.Contains(m.Content, commandPrefix) {
return
}
searchQuery := strings.TrimSpace(m.Content[len(commandPrefix):])
if searchQuery == "" {
return
}
hnClient := hackernews.NewAPIClient()
res, err := hnClient.Search(mh.ctx, searchQuery)
if err != nil {
log.Errorf("Could not search hacker news API: %v", err)
return
}
if _, err := s.ChannelMessageSend(m.ChannelID, res.String()); err != nil {
log.Errorf("Could not send message over channel: %v", err)
}
}
|
[
"\"DISCORD_TOKEN\""
] |
[] |
[
"DISCORD_TOKEN"
] |
[]
|
["DISCORD_TOKEN"]
|
go
| 1 | 0 | |
nustar_gen/wrappers.py
|
import os, stat
import warnings
from nustar_gen.utils import energy_to_chan, validate_det1_region
from astropy import units as u
def make_spectra(infile, mod, src_reg,
mode='01', bgd_reg='None', outpath='None', runmkarf='yes', extended='no'):
'''
Generate a script to run nuproducts to extract a source (and optionally
a background) spectrum along with their response files.
Always runs numkrmf and numkarf for now.
Parameters
----------
infile: str
Full path to the input event file.
mod: str
'A' or 'B'
src_reg: str
Full path to source region.
Other Parameters
-------------------
bgd_reg: str
If not 'None', then must be the full path to the background region file
barycorr: bool
outpath: str
Optional. Default is to put the lightcurves in the same location as infile
mode: str
Optional. Used primarily if you're doing mode06 analysis and need to specify
output names that are more complicated.
'''
from astropy.io.fits import getheader
# Make sure environment is set up properly
_check_environment()
# Check to see that all files exist:
assert os.path.isfile(infile), 'make_spectra: infile does not exist!'
assert os.path.isfile(src_reg), 'make_spectra: src_reg does not exist!'
if bgd_reg != 'None':
assert os.path.isfile(bgd_reg), 'make_spectra: bgd_reg does not exist!'
bkgextract='yes'
else:
bkgextract='no'
reg_base = os.path.basename(src_reg)
reg_base = os.path.splitext(reg_base)[0]
evdir = os.path.dirname(infile)
seqid = os.path.basename(os.path.dirname(evdir))
if outpath == 'None':
outdir = evdir
else:
outdir = outpath
try:
os.makedirs(outpath)
except FileExistsError:
# directory already exists
pass
stemout = f'nu{seqid}{mod}{mode}_{reg_base}'
lc_script = outdir+f'/runspec_{stemout}.sh'
with open(lc_script, 'w') as f:
f.write('nuproducts imagefile=NONE lcfile=NONE bkglcfile=NONE ')
f.write(f'runmkarf={runmkarf} extended={extended} runmkrmf=yes ')
f.write(f'indir={evdir} outdir={outdir} instrument=FPM{mod} ')
f.write(f'steminputs=nu{seqid} stemout={stemout} ')
f.write(f'srcregionfile={src_reg} ')
if bkgextract == 'no':
f.write(f'bkgextract=no ')
else:
f.write(f'bkgextract=yes bkgregionfile={bgd_reg} ')
f.write('clobber=yes')
os.chmod(lc_script, stat.S_IRWXG+stat.S_IRWXU)
return lc_script
def make_lightcurve(infile, mod, src_reg,
barycorr=False, time_bin=100*u.s, mode='01',
bgd_reg='None', outpath='None', elow=3, ehigh=20):
'''
Generate a script to run nuproducts
Parameters
----------
infile: str
Full path to the input event file.
mod: str
'A' or 'B'
src_reg: str
Full path to source region.
Other Parameters
-------------------
bgd_reg: str
If not 'None', then must be the full path to the background region file
barycorr: bool
Default is 'False'. If 'True', then queries the infile for the OBJ J2000
coordinates and uses these for the barycenter correction.
elow: float
Low-eneryg bound. Default is 3 keV.
ehigh: float
High-energy bound. Default is 20 keV.
outpath: str
Optional. Default is to put the lightcurves in the same location as infile
mode: str
Optional. Used primarily if you're doing mode06 analysis and need to specify
output names that are more complicated.
'''
from astropy.io.fits import getheader
# Make sure environment is set up properly
_check_environment()
# Check to see that all files exist:
assert os.path.isfile(infile), 'make_lightcurve: infile does not exist!'
assert os.path.isfile(src_reg), 'make_lightcurve: src_reg does not exist!'
if bgd_reg != 'None':
assert os.path.isfile(bgd_reg), 'make_lightcurve: bgd_reg does not exist!'
bkgextract='yes'
else:
bkgextract='no'
reg_base = os.path.basename(src_reg)
reg_base = os.path.splitext(reg_base)[0]
evdir = os.path.dirname(infile)
seqid = os.path.basename(os.path.dirname(evdir))
if outpath == 'None':
outdir = evdir
else:
outdir = outpath
try:
os.makedirs(outdir)
except FileExistsError:
# directory already exists
pass
time_bin = (time_bin.to(u.s)).value
stemout = f'nu{seqid}{mod}{mode}_{reg_base}_{elow}to{ehigh}_{time_bin:3.4}s'
lc_script = outdir+f'/runlc_{stemout}.sh'
pi_low = energy_to_chan(elow)
pi_high = energy_to_chan(ehigh)
with open(lc_script, 'w') as f:
f.write('nuproducts phafile=NONE bkgphafile=NONE imagefile=NONE ')
f.write(f'runmkarf=no runmkrmf=no pilow={pi_low} pihigh={pi_high} ')
f.write(f'indir={evdir} outdir={outdir} instrument=FPM{mod} ')
f.write(f'steminputs=nu{seqid} stemout={stemout} ')
f.write(f'srcregionfile={src_reg} ')
if bkgextract == 'no':
f.write(f'bkgextract=no ')
else:
f.write(f'bkgextract=yes bkgregionfile={bgd_reg} ')
f.write(f'binsize={time_bin} ')
if barycorr:
attorb=evdir+f'/nu{seqid}{mod}.attorb'
hdr = getheader(infile)
ra = hdr['RA_OBJ']
dec = hdr['DEC_OBJ']
f.write(f'barycorr=yes srcra_barycorr={ra} srcdec_barycorr={dec} ')
f.write(f'orbitfile={attorb} ')
f.write('clobber=yes')
os.chmod(lc_script, stat.S_IRWXG+stat.S_IRWXU)
return lc_script
def make_exposure_map(obs, mod, vign_energy = False,
det_expo=False, evf=False):
'''
Create a script to run nuexpoomap. Returns the script name.
Parameters
----------
obs: nustar_gen.info.Observation(), required
A valid observation metadata.
mod: str
'A' or 'B'
Other Parameters
----------------
vign_energy: float, optional
Energy where you want to apply the vignetting. Default is no vignetting.
det_expo : boolean, optional, default=False
Whether or not to retain the DET1 exposure map file
'''
import glob
# Make sure environment is set up properly
_check_environment()
# Locate the mast file, attfile, which are what you need for inputs.
evdir = obs.evdir
# Find the mast file. glob is necessary to handle .gz or .fits extensions:
mastaspectfile = glob.glob(evdir+'/nu'+obs.seqid+'*mast*')[0]
# Find the attitude file:
attfile = glob.glob(evdir+'/nu'+obs.seqid+'*att.fits')[0]
# Find the det1reffile:
det1reffile = glob.glob(evdir+'/nu'+obs.seqid+mod+'_det1.fits')[0]
# Only do this for A01, since that's all that matters
# Override this with evfile keyword:
if evf is False:
evfile = obs.science_files[mod][0]
assert '01' in evfile, f'make_exposure_map: Not an 01 event file: {evfile}'
else:
evfile=evf
# Construct the nuexpomap call:
print(obs.seqid, mod)
expo_script = obs.out_path+'/runexpo_'+obs.seqid+mod+'.sh'
expo = open(expo_script, 'w')
cmd_string = 'nuexpomap '
cmd_string += f'infile={evfile} '
if vign_energy is not False:
cmd_string+=f'vignflag=yes energy={vign_energy} '
else:
cmd_string += 'vignflag=no '
cmd_string += f'mastaspectfile={mastaspectfile} '
cmd_string += f'attfile={attfile} '
cmd_string += f'det1reffile={det1reffile} '
sky_expo_file = obs.out_path+'/nu'+obs.seqid+mod+'_sky_expo.fits'
cmd_string += f'expomapfile={sky_expo_file} '
if det_expo:
det_expo_file = obs.out_path+'/nu'+obs.seqid+mod+'_det1_expo.fits'
cmd_string += f'det1instrfile={det_expo_file} '
cmd_string += 'clobber=yes '
expo.write(cmd_string)
expo.close()
os.chmod(expo_script, stat.S_IRWXG+stat.S_IRWXU)
return expo_script
def make_image(infile, elow = 3, ehigh = 20, clobber=True, outpath=False, usrgti=False):
'''
Spawn an xselect instance that produces the image in the energy range.
Parameters
----------
infile: str
Full path tot eh file that you want to process
elow: float
Low-energy band for the image
ehigh: float
High-energy band for the image
Other Parameters
----------------
clobber: boolean, optional, default=True
Overwrite existing files?
outpath: str, optional, default=os.path.dirname(infile)
Set the destination for output. Defaults to same location as infile.
usrgti : str, optional, default = False
Use a GTI file to time-fitler the data (see nustar_gen.utils.make_usr_gti)
If False, do nothing.
Return
-------
outfile: str
The full path to the output image.
'''
# Make sure environment is set up properly
_check_environment()
# Check if input file exists:
try:
with open(infile) as f:
pass
except IOError:
raise IOError("make_image: File does not exist %s" % (infile))
if not outpath:
outdir=os.path.dirname(infile)
else:
outdir=outpath
try:
os.makedirs(outdir)
except FileExistsError:
# directory already exists
pass
# Trim the filename:
sname=os.path.basename(infile)
if sname.endswith('.gz'):
sname = os.path.splitext(sname)[0]
sname = os.path.splitext(sname)[0]
if usrgti is not False:
rshort = os.path.basename(usrgti)
rname = os.path.splitext(rshort)[0]
sname += f'_{rname}'
# Generate outfile name
outfile = outdir+sname+f'_{elow}to{ehigh}keV.fits'
if (os.path.exists(outfile)):
if (~clobber):
warnings.warn('make_image: %s exists, use clobber=True to regenerate' % (outfile))
else:
os.system("rm "+outfile)
xsel_file = _make_xselect_commands(infile, outfile, elow, ehigh, usrgti=usrgti)
os.system("xselect @"+xsel_file)
os.system("rm -r -f "+xsel_file)
return outfile
def extract_sky_events(infile, regfile, clobber=True, outpath=False):
'''
Spawn an xselect instance that produces a new event file screened using a sky ds9
region file.
Parameters
----------
infile: str
Full path to the event file that you want to process
regfile: str
Full path to a ds9 region file (in sky coordinates) to be used to filter
the events.
Other Parameters
----------------
clobber: boolean, optional, default=True
Overwrite existing files?
outpath: str, optional, default=os.path.dirname(infile)
Set the destination for output. Defaults to same location as infile.
Return
-------
outfile: str
The full path to the output image.
'''
# Make sure environment is set up properly
_check_environment()
# Check if input file exists:
try:
with open(infile) as f:
pass
except IOError:
raise IOError("extract_det1_events: File does not exist %s" % (infile))
try:
with open(regfile) as f:
pass
except IOError:
raise IOError("extract_det1_events: File does not exist %s" % (regfile))
if not outpath:
outdir=os.path.dirname(infile)
else:
outdir=outpath
# Trim the filename:
sname=os.path.basename(infile)
if sname.endswith('.gz'):
sname = os.path.splitext(sname)[0]
sname = os.path.splitext(sname)[0]
rshort = os.path.basename(regfile)
rname = os.path.splitext(rshort)[0]
# Generate outfile name
outfile = outdir + '/'+sname+f'_{rname}.evt'
if (os.path.exists(outfile)) & (~clobber):
warnings.warn('extract_sky_events: %s exists, use clobber=True to regenerate' % (outfile))
else:
os.system("rm "+outfile)
xsel_file = _make_xselect_commands_sky_evts(infile, outfile, regfile)
os.system("xselect @"+xsel_file)
os.system("rm -r -f "+xsel_file)
return outfile
def barycenter_events(obs, infile, mod='A'):
'''
Run barycorr on an event file.
Parameters
--------------------
obs: nustar_gen.info.Observation
An instance of the Observation class
infile: str
Full path to input file
mod: str
Module to use. 'A' or 'B'
Other Parameters
-------------------
TO BE IMPLEMENTED
clockfile: str
Path to the clockfile you want to use. Default is to use the CALDB clockfile
'''
# Locate the attorb file:
evdir = obs.evdir
attorb = f'{obs.evdir}nu{obs.seqid}{mod}.attorb'
# Trim the filename:
if obs.out_path is False:
outdir = os.path.dirname(infile)
print(outdir)
else:
outdir = obs.out_path
sname=os.path.basename(infile)
sname=os.path.splitext(sname)[0]
# Generate outfile name
outfile = outdir + '/'+sname+f'_barycorr.fits'
bary_sh = outdir+'/run_bary_'+sname+'.sh'
with open(bary_sh, 'w') as f:
f.write(f'barycorr infile={infile} clobber=yes ')
f.write(f'outfile={outfile} orbitfiles={attorb} ')
f.write(f'ra={obs.source_position.ra.deg} dec={obs.source_position.dec.deg} ')
os.environ['HEADASNOQUERY'] = ""
os.environ['HEADASPROMPT'] = "/dev/null"
os.chmod(bary_sh, stat.S_IRWXG+stat.S_IRWXU)
os.system(f'{bary_sh}')
return outfile
def apply_gti(infile, gtifile, clobber=True, outpath=False):
'''
Spawn an xselect instance that produces a new event file screened using GTI file
Parameters
----------
infile: str
Full path to the event file that you want to process
regfile: str
Full path to a ds9 region file (in sky coordinates) to be used to filter
the events.
Other Parameters
----------------
clobber: boolean, optional, default=True
Overwrite existing files?
outpath: str, optional, default=os.path.dirname(infile)
Set the destination for output. Defaults to same location as infile.
Return
-------
outfile: str
The full path to the output image.
'''
# Make sure environment is set up properly
_check_environment()
# Check if input file exists:
try:
with open(infile) as f:
pass
except IOError:
raise IOError("apply_gti: File does not exist %s" % (infile))
try:
with open(gtifile) as f:
pass
except IOError:
raise IOError("apply_gti: File does not exist %s" % (gtifile))
if not outpath:
outdir=os.path.dirname(infile)
else:
outdir=outpath
# Trim the filename:
sname=os.path.basename(infile)
if sname.endswith('.gz'):
sname = os.path.splitext(sname)[0]
sname = os.path.splitext(sname)[0]
rshort = os.path.basename(gtifile)
rname = os.path.splitext(rshort)[0]
# Generate outfile name
outfile = outdir + '/'+sname+f'_{rname}.evt'
if (os.path.exists(outfile)) & (~clobber):
warnings.warn('apply_gti: %s exists, use clobber=True to regenerate' % (outfile))
else:
os.system("rm "+outfile)
xsel_file = _make_xselect_commands_apply_gti(infile, outfile, gtifile)
os.system("xselect @"+xsel_file)
os.system("rm -r -f "+xsel_file)
return outfile
def _make_xselect_commands_apply_gti(infile, outfile, gtifile):
'''
Helper script to generate the xselect commands to extract events from
a given sky region.
'''
import glob
for oldfile in glob.glob("session1*"):
os.system(f"rm {oldfile}")
xsel=open("xsel.xco","w")
xsel.write("session1\n")
xsel.write("read events \n")
evdir=os.path.dirname(infile)
xsel.write(f'{evdir} \n ' )
evfile = os.path.basename(infile)
xsel.write(f'{evfile} \n ')
xsel.write('yes \n')
xsel.write(f'filter time \n')
xsel.write('file \n')
xsel.write(f'{gtifile}\n')
xsel.write('extract events\n')
xsel.write("save events\n")
xsel.write("%s \n" % outfile)
xsel.write('n \n')
xsel.write('exit\n')
xsel.write('n \n')
xsel.close()
return 'xsel.xco'
def _make_xselect_commands_sky_evts(infile, outfile, regfile):
'''
Helper script to generate the xselect commands to extract events from
a given sky region.
'''
import glob
for oldfile in glob.glob("session1*"):
os.system(f"rm {oldfile}")
xsel=open("xsel.xco","w")
xsel.write("session1\n")
xsel.write("read events \n")
evdir=os.path.dirname(infile)
xsel.write(f'{evdir} \n ' )
evfile = os.path.basename(infile)
xsel.write(f'{evfile} \n ')
xsel.write('yes \n')
xsel.write(f'filter region {regfile} \n')
xsel.write("extract events\n")
xsel.write("save events\n")
xsel.write("%s \n" % outfile)
xsel.write('n \n')
xsel.write('exit\n')
xsel.write('n \n')
xsel.close()
return 'xsel.xco'
def _make_xselect_commands(infile, outfile, elow, ehigh, usrgti=False):
'''
Helper script to generate the xselect commands to make an image in a given NuSTAR range
'''
xsel=open("xsel.xco","w")
xsel.write("session1\n")
xsel.write("read events \n")
evdir=os.path.dirname(infile)
xsel.write(f'{evdir} \n ' )
evfile = os.path.basename(infile)
xsel.write(f'{evfile} \n ')
xsel.write('yes \n')
pi_low = energy_to_chan(elow)
pi_high = energy_to_chan(ehigh)
if usrgti is not False:
xsel.write(f'filter time \n')
xsel.write('file \n')
xsel.write(f'{usrgti}\n')
xsel.write('extract events\n')
xsel.write('filter pha_cutoff {} {} \n'.format(pi_low, pi_high))
xsel.write('set xybinsize 1\n')
xsel.write("extract image\n")
xsel.write("save image\n")
xsel.write("%s \n" % outfile)
xsel.write('exit\n')
xsel.write('n \n')
xsel.close()
return 'xsel.xco'
def _check_environment():
try:
if ("CALDB" in os.environ) & ("HEADAS" in os.environ):
pass
except IOerror:
raise IOError("Environment variables $CALDB and $HEADAS not set")
###
###
###
###
###
###
# From here down are DET1 methods
###
###
###
###
###
###
def make_det1_image(infile, elow = 3, ehigh = 20, clobber=True, outpath=False):
'''
Spawn an xselect instance that produces a DET1 image in the energy range.
Parameters
----------
infile: str
Full path tot eh file that you want to process
elow: float
Low-energy band for the image
ehigh: float
High-energy band for the image
Other Parameters
----------------
clobber: boolean, optional, default=True
Overwrite existing files?
outpath: str, optional, default=os.path.dirname(infile)
Set the destination for output. Defaults to same location as infile.
Return
-------
outfile: str
The full path to the output image.
'''
# Make sure environment is set up properly
_check_environment()
# Check if input file exists:
try:
with open(infile) as f:
pass
except IOError:
raise IOError("make_image: File does not exist %s" % (infile))
if not outpath:
outdir=os.path.dirname(infile)
else:
outdir=outpath
# Trime the filename:
sname=os.path.basename(infile)
if sname.endswith('.gz'):
sname = os.path.splitext(sname)[0]
sname = os.path.splitext(sname)[0]
# Generate outfile name
outfile = outdir + '/'+sname+f'_{elow}to{ehigh}keV_det1.fits'
if (os.path.exists(outfile)) & (~clobber):
warnings.warn('make_image: %s exists, use clobber=True to regenerate' % (outfile))
else:
os.system("rm "+outfile)
xsel_file = _make_xselect_commands_det1(infile, outfile, elow, ehigh)
os.system("xselect @"+xsel_file)
os.system("rm -r -f "+xsel_file)
return outfile
def extract_det1_events(infile, regfile, clobber=True, outpath=False):
'''
Spawn an xselect instance that produces a new event file screened using a det1 region
file.
Parameters
----------
infile: str
Full path to the event file that you want to process
regfile: str
Full path to a ds9 region file (in physical coordinates) to be used to filter
the events.
Other Parameters
----------------
clobber: boolean, optional, default=True
Overwrite existing files?
outpath: str, optional, default=os.path.dirname(infile)
Set the destination for output. Defaults to same location as infile.
Return
-------
outfile: str
The full path to the output image.
'''
# Make sure environment is set up properly
_check_environment()
# Make sure region file is correctly formatted
validate_det1_region(regfile)
# Check if input file exists:
try:
with open(infile) as f:
pass
except IOError:
raise IOError("extract_det1_events: File does not exist %s" % (infile))
try:
with open(regfile) as f:
pass
except IOError:
raise IOError("extract_det1_events: File does not exist %s" % (regfile))
if not outpath:
outdir=os.path.dirname(infile)
else:
outdir=outpath
# Trim the filename:
sname=os.path.basename(infile)
if sname.endswith('.gz'):
sname = os.path.splitext(sname)[0]
sname = os.path.splitext(sname)[0]
rshort = os.path.basename(regfile)
rname = os.path.splitext(rshort)[0]
# Generate outfile name
outfile = outdir + '/'+sname+f'_{rname}.evt'
if (os.path.exists(outfile)) & (~clobber):
warnings.warn('extract_det1_events: %s exists, use clobber=True to regenerate' % (outfile))
else:
os.system("rm "+outfile)
xsel_file = _make_xselect_commands_det1_evts(infile, outfile, regfile)
os.system("xselect @"+xsel_file)
os.system("rm -r -f "+xsel_file)
return outfile
def make_det1_lightcurve(infile, mod, obs,
time_bin=100*u.s, mode='01',
elow=3, ehigh=20, stemout=False, gtifile=False):
'''
Generate a script to run nuproducts to make a lightcurve using the whole
FoV and turning off all vignetting and PSF effects. Assumes that infile
has already been filtered using extract_det1_events().
Parameters
----------
infile: str
Full path to the input event file. This should be pre-filtered by
by extract_det1_events
mod: str
'A' or 'B'
obs: nustar_gen.info.Observation
Observation meta data
Other Parameters
-------------------
elow: float, optional, default = 3 keV
Low-energy bound
ehigh: float, optional, default is 20 keV
High-energy bound
mode: str, optional, default is '01'
Optional. Used to specify stemout if you're doing mode06 analysis and want
to specify output names that are more complicated.
gtifile: str
Path to a GTI file. If this is set, then this is passed to nuproducts.
stemout: str, optional
Use the specified stemout string when calling nuproducts. Otherwise
uses the default value.
'''
from astropy.io.fits import getheader
# Make sure environment is set up properly
_check_environment()
# Check to see that all files exist:
assert os.path.isfile(infile), 'make_det1_lightcurve: infile does not exist!'
# evdir = os.path.dirname(infile)
evdir = obs.evdir
seqid = obs.seqid
# seqid = os.path.basename(os.path.dirname(evdir))
outdir = obs.out_path
# if outpath is None:
# outdir = evdir
# else:
# outdir = outpath
hdr = getheader(infile)
ra = hdr['RA_OBJ']
dec = hdr['DEC_OBJ']
time_bin = int((time_bin.to(u.s)).value)
if stemout is False:
stemout = f'nu{seqid}{mod}{mode}_full_FoV_{elow}to{ehigh}_{time_bin}s'
lc_script = f'{outdir}/rundet1lc_{stemout}.sh'
pi_low = energy_to_chan(elow)
pi_high = energy_to_chan(ehigh)
with open(lc_script, 'w') as f:
f.write('nuproducts phafile=NONE bkgphafile=NONE imagefile=NONE ')
f.write(f'infile={infile} ')
f.write('runmkarf=no runmkrmf=no ')
f.write(f'indir={evdir} outdir={outdir} instrument=FPM{mod} ')
f.write(f'steminputs=nu{seqid} stemout={stemout} ')
f.write(f'pilow={pi_low} pihigh={pi_high} ')
f.write(f'bkgextract=no ')
f.write(f'binsize={time_bin} ')
f.write(f'srcra={ra} srcdec={dec} srcregionfile=DEFAULT srcradius=299 ')
# Turn off all of the time-dependent corrections for the pointing here
f.write(f'lcpsfflag=no lcexpoflag=no lcvignflag=no ')
if (gtifile != False):
f.write(f'usrgtifile={gtifile} ')
f.write('clobber=yes')
os.chmod(lc_script, stat.S_IRWXG+stat.S_IRWXU)
return lc_script
def make_det1_spectra(infile, mod, obs,
stemout=False, gtifile=False):
'''
Generate a script to run nuproducts to extract a source
spectrum along with the associated RMF.
Assumes that infile has already been filtered using extract_det1_events().
Always runs numkrmf, never runs numkarf. Never extract background.
Parameters
----------
infile: str
Full path to the input event file. This should be pre-filtered by
by extract_det1_events
mod: str
'A' or 'B'
obs: nustar_gen.info.Observation
Observation meta data
Other Parameters
-------------------
stemout: str
Optional. Use the specified stemout string when calling nuproducts. Otherwise
uses the default value.
gtifile: str
Path to a GTI file. If this is set, then this is passed to nuproducts.
**NOTE** As of now proper treatment of this being barycenter corrected (or not)
is not supported. If you're doing pulsar analysis, please write your own version.
'''
# from astropy.io.fits import getheader
from os.path import basename
# Make sure environment is set up properly
_check_environment()
# Check to see that all files exist:
assert os.path.isfile(infile), f'make_det1_spectra: {infile} does not exist!'
# assert os.path.isfile(src_reg), 'make_det1_spectra: src_reg does not exist!'
bkgextract='no'
evdir = obs.evdir
seqid = obs.seqid
outdir = obs.out_path
# Construct the output file name:
# hdr = getheader(infile)
ra =obs.source_position.ra.deg
dec = obs.source_position.dec.deg
# if outpath == 'None':
# outdir = evdir
# else:
# outdir = outpath
# try:
# os.makedirs(outdir)
# except FileExistsError:
# # directory already exists
# pass
# stemout = f'nu{seqid}{mod}{mode}_{reg_base}_det1'
# Use the default stemout unless this is set
if stemout is False:
stemout = basename(infile).split('.')[0]
lc_script = outdir+f'/rundet1spec_{stemout}.sh'
with open(lc_script, 'w') as f:
f.write('nuproducts imagefile=NONE lcfile=NONE bkglcfile=NONE ')
f.write(f'infile={infile} ')
f.write('runmkarf=no runmkrmf=yes ')
f.write(f'indir={evdir} outdir={outdir} instrument=FPM{mod} ')
f.write(f'steminputs=nu{seqid} stemout={stemout} ')
f.write(f'srcra={ra} srcdec={dec} srcregionfile=DEFAULT srcradius=299 ')
if (gtifile != False):
f.write(f'usrgtifile={gtifile} ')
f.write(f'runbackscale=no ')
f.write(f'bkgextract=no ')
f.write('clobber=yes')
os.chmod(lc_script, stat.S_IRWXG+stat.S_IRWXU)
return lc_script
def _make_xselect_commands_det1_evts(infile, outfile, regfile):
'''
Helper script to generate the xselect commands to extract events from
a given region.
'''
import glob
for oldfile in glob.glob("session1*"):
os.system(f"rm {oldfile}")
xsel=open("xsel.xco","w")
xsel.write("session1\n")
xsel.write("read events \n")
evdir=os.path.dirname(infile)
xsel.write(f'{evdir} \n ' )
evfile = os.path.basename(infile)
xsel.write(f'{evfile} \n ')
xsel.write('yes \n')
xsel.write('set xyname\n')
xsel.write('DET1X\n')
xsel.write('DET1Y\n')
xsel.write(f'filter region {regfile} \n')
xsel.write("extract events\n")
xsel.write("save events\n")
xsel.write("%s \n" % outfile)
xsel.write('n \n')
xsel.write('exit\n')
xsel.write('n \n')
xsel.close()
return 'xsel.xco'
def _make_xselect_commands_det1(infile, outfile, elow, ehigh):
'''
Helper script to generate the xselect commands to make an image in a
given NuSTAR energy range
'''
import glob
for oldfile in glob.glob("session1*"):
os.system(f"rm {oldfile}")
xsel=open("xsel.xco","w")
xsel.write("session1\n")
xsel.write("read events \n")
evdir=os.path.dirname(infile)
xsel.write(f'{evdir} \n ' )
evfile = os.path.basename(infile)
xsel.write(f'{evfile} \n ')
xsel.write('yes \n')
xsel.write('set xyname\n')
xsel.write('DET1X\n')
xsel.write('DET1Y\n')
pi_low = energy_to_chan(elow)
pi_high = energy_to_chan(ehigh)
xsel.write('filter pha_cutoff {} {} \n'.format(pi_low, pi_high))
xsel.write('set xybinsize 1\n')
xsel.write("extract image\n")
xsel.write("save image\n")
xsel.write("%s \n" % outfile)
xsel.write('exit\n')
xsel.write('n \n')
xsel.close()
return 'xsel.xco'
|
[] |
[] |
[
"HEADASPROMPT",
"HEADASNOQUERY"
] |
[]
|
["HEADASPROMPT", "HEADASNOQUERY"]
|
python
| 2 | 0 | |
server.py
|
from http.server import ThreadingHTTPServer, HTTPServer, BaseHTTPRequestHandler
from http import cookies
import ssl
from io import BytesIO
import json
import secrets
import datetime as dt
import jwt
import copy
from jwt import encode, decode
from datetime import datetime, timedelta
from eth_account import Account
from eth_account.messages import defunct_hash_message, encode_defunct
import sha3
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_private_key, load_pem_public_key
from base64 import urlsafe_b64decode
from coinaddr import validate
import os
import re
import requests
import numpy
import time
import database
import google_email
MAILGUN_API_KEY = os.getenv('MAILGUN_API_KEY', False)
MAILGUN_EMAIL_ADDRESS = os.getenv('MAILGUN_EMAIL_ADDRESS', False)
WEBSITE_ORIGINS = os.getenv('WEBSITE_ORIGINS', False)
WEBSITE_ORIGINS = WEBSITE_ORIGINS.split(',')
PORT_NUMBER = 8080
#TODO: take object instead of address to encode with a specified time
def encode_jwt(addr):
with open('unsafe_private.pem', 'r') as file:
private_key = file.read()
private_rsa_key = load_pem_private_key(bytes(private_key, 'utf-8'), password=None, backend=default_backend())
exp = datetime.utcnow() + timedelta(days=7)
encoded_jwt = encode({'address':addr, 'exp':exp}, private_rsa_key, algorithm='RS256')
return encoded_jwt
def decode_jwt(user_jwt):
with open('public.pem', 'r') as file:
public_key = file.read()
public_rsa_key = load_pem_public_key(bytes(public_key,'utf-8'), backend=default_backend())
decoded_jwt = decode(user_jwt, public_rsa_key, algorithms='RS256')
return decoded_jwt
def isValidNoradNumber(value):
try:
int_value = int(value)
except Exception as e:
print(e)
return False
if int_value < 100000 and int_value > 0:
return True
else:
return False
def isValidEthereumAddress(addr):
check_address = validate('eth', bytes(addr, 'utf-8'))
return check_address.valid
def isValidEmailAddress(email):
regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex, email)
def isValidSecret(secret):
regex = r'^([0-9]{1,10})([\/])([0-9, a-f]{32})([\/])([0-9,a-f]{160})$'
return re.search(regex, secret)
def isValidUserSetting(setting):
regex = r'^([0-9,a-z,A-Z,\',!,\s,\\,!,@,#,$,%,^,&,*,(,),[,\],{,},+,\-,=,_,|,;,\,,.,/,?]{1,255})$'
return re.search(regex, setting)
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
# Read config and set up database connection
CONFIG = os.path.abspath("../../trusat-config.yaml")
self.db = database.Database(CONFIG)
super().__init__(request, client_address, server)
def send_500(self, message='', explain='', request_origin='*'):
self.send_response(500, message=message)
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Content-type', 'application/json')
self.end_headers()
body_bytes = {"error": message + ': ' + explain}
self.wfile.write(bytes(json.dumps(body_bytes), 'utf-8'))
self.db.clean()
def send_400(self, message='', explain='', request_origin='*'):
self.send_response(400, message=message)
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Content-type', 'application/json')
self.end_headers()
body_bytes = {"error": message + ': ' + explain}
self.wfile.write(bytes(json.dumps(body_bytes), 'utf-8'))
self.db.clean()
def send_401(self, message='', explain='', request_origin='*'):
self.send_response(401, message=message)
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Content-type', 'application/json')
self.end_headers()
body_bytes = {"error": message + ': ' + explain}
self.wfile.write(bytes(json.dumps(body_bytes), 'utf-8'))
self.db.clean()
def send_404(self, message='', explain='', request_origin='*'):
self.send_error(404, message=message)
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
self.db.clean()
def send_204(self, request_origin='*'):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
def send_200_JSON(self, body_data, request_origin='*'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
try:
body_bytes = bytes(body_data, 'utf-8')
except Exception as e:
print(e)
body_bytes = b'[]'
self.wfile.write(body_bytes)
def send_200_JSON2(self, body_data, request_origin='*'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
self.wfile.write(body_data)
def send_200_JSON_cache(self, body_data, request_origin='*'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Cache-Control', 'max-age=300')
self.end_headers()
try:
body_bytes = bytes(body_data, 'utf-8')
except Exception as e:
print(e)
body_bytes = b'[]'
self.wfile.write(body_bytes)
def send_200_text(self, body_data, request_origin='*'):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
try:
body_bytes = bytes(body_data, 'utf-8')
except Exception as e:
print(e)
body_bytes = b''
self.wfile.write(body_bytes)
def send_200_text_cache(self, body_data, request_origin='*'):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Cache-Control', 'max-age=300')
self.end_headers()
try:
body_bytes = bytes(body_data, 'utf-8')
except Exception as e:
print(e)
body_bytes = b''
self.wfile.write(body_bytes)
def do_OPTIONS(self):
try:
request_origin = self.headers.get("Origin")
if request_origin in WEBSITE_ORIGINS:
request_origin = request_origin
else:
request_origin = '*'
print(request_origin)
except Exception as e:
print(e)
request_origin = False
try:
path = self.path.split('?')[0]
except Exception as e:
print(e)
path = self.path
if path == "/catalog/priorities" or \
path == "/catalog/undisclosed" or \
path == "/catalog/debris" or \
path == "/catalog/latest" or \
path == "/catalog/all" or \
path == "/tle/trusat_all.txt" or \
path == "/tle/trusat_priorities.txt" or \
path == "/tle/trusat_high_confidence.txt" or \
path == "/astriagraph" or \
path == "/profile" or \
path == "/object/influence" or \
path == "/object/info" or \
path == "/object/history" or \
path == "/object/userSightings" or \
path == "/tle/object" or \
path == "/findObject" or \
path == "/cookieMonster" or \
path == "/errorTest":
self.send_response(200)
self.send_header('Accept', 'GET')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Headers', 'Cache-Control')
self.end_headers()
elif path == "/getNonce" or \
path == "/signup" or \
path == "/login" or \
path == "/editProfile" or \
path == "/claimAccount" or \
path == "/verifyClaimAccount" or \
path == "/getObservationStations" or \
path == "/generateStation" or \
path == "/submitObservation":
self.send_response(200)
self.send_header('Accept', 'POST')
self.send_header('Access-Control-Allow-Headers', '*')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.end_headers()
else:
self.send_404(request_origin=request_origin)
def do_GET(self):
try:
request_origin = self.headers.get("Origin")
if request_origin in WEBSITE_ORIGINS:
request_origin = request_origin
else:
request_origin = '*'
print(request_origin)
except Exception as e:
print(e)
request_origin = False
try:
user_cookie = cookies.SimpleCookie(self.headers.get('Cookie'))
cookie_jwt = user_cookie['jwt'].value
print("COOKIES!")
print(user_cookie)
except Exception as e:
print(e)
user_cookie = False
cookie_jwt = False
try:
path = self.path.split('?')[0]
parameters = self.path.split('?')[1]
except Exception as e:
print(e)
path = self.path
try:
parameters = parameters.split('&')
parameters_map = {}
for param in parameters:
parameters_map[param.split('=')[0]] = param.split('=')[1]
except Exception as e:
print(e)
parameters_map = {}
if path == "/catalog/priorities":
try:
json_object = self.db.selectCatalog_Priorities_JSON()
except Exception as e:
print(e)
self.send_500(message='Could not get priorities', explain='Priorities query failed', request_origin=request_origin)
return
if json_object is not False:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == "/catalog/undisclosed":
try:
json_object = self.db.selectCatalog_Undisclosed_JSON()
except Exception as e:
print(e)
self.send_500(message='Could not get undisclosed', explain='Undisclosed query failed', request_origin=request_origin)
return
if json_object is not False:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == "/catalog/debris":
try:
json_object = self.db.selectCatalog_Debris_JSON()
except Exception as e:
print(e)
self.send_500(message='Could not get debris', explain='Debris query failed', request_origin=request_origin)
return
if json_object is not False:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == "/catalog/latest":
try:
json_object = self.db.selectCatalog_Latest_JSON()
except Exception as e:
print(e)
self.send_500(message='Could not get latest', explain='Latest query failed', request_origin=request_origin)
return
if json_object is not False:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == "/catalog/all":
try:
json_object = self.db.selectCatalog_All_JSON()
except Exception as e:
print(e)
self.send_500(message='Could not get all', explain='All query failed', request_origin=request_origin)
return
if json_object is not False:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == "/tle/trusat_all.txt":
try:
two_line_elements = self.db.selectTLE_all()
except Exception as e:
print(e)
self.send_500(message='Could not get TLEs', explain='TLE query failed', request_origin=request_origin)
return
if two_line_elements is not False:
self.send_200_text_cache(two_line_elements, request_origin=request_origin)
else:
self.send_200_text_cache('', request_origin=request_origin)
return
elif path == "/tle/trusat_priorities.txt":
try:
two_line_elements = self.db.selectTLE_priorities()
except Exception as e:
print(e)
self.send_500(message='Could not get TLEs', explain='TLE query failed', request_origin=request_origin)
return
if two_line_elements is not False:
self.send_200_text_cache(two_line_elements, request_origin=request_origin)
else:
self.send_200_text_cache('', request_origin=request_origin)
return
elif path == "/tle/trusat_high_confidence.txt":
try:
two_line_elements = self.db.selectTLE_high_confidence()
except Exception as e:
print(e)
self.send_500(message='Could not get TLEs', explain='TLE query failed', request_origin=request_origin)
return
if two_line_elements is not False:
self.send_200_text_cache(two_line_elements, request_origin=request_origin)
else:
self.send_200_text_cache('', request_origin=request_origin)
return
elif path == "/astriagraph":
try:
tles_json = self.db.selectTLE_Astriagraph()
except Exception as e:
print(e)
self.send_500(message='Could not get TLEs', explain='TLE query failed', request_origin=request_origin)
return
if tles_json is not False:
self.send_200_text_cache(tles_json, request_origin=request_origin)
else:
self.send_200_text_cache('', request_origin=request_origin)
return
elif path == "/profile":
jwt_user_addr = ''
try:
user_addr = parameters_map["address"]
except Exception as e:
print(e)
self.send_400(message='Missing address', explain='Address is missing from the parameters', request_origin=request_origin)
return
if isValidEthereumAddress(user_addr) is False:
self.send_400(message='Address is invalid', explain='Address is not an Ethereum Address', request_origin=request_origin)
return
try:
user_profile_json = self.db.selectProfileInfo_JSON(user_addr)
objects_observed_json = self.db.selectUserObjectsObserved_JSON(user_addr)
observation_history_json = self.db.selectUserObservationHistory_JSON(user_addr)
user_profile_json["objects_observed"] = objects_observed_json
user_profile_json["observation_history"] = observation_history_json
user_profile_json["observation_stations"] = []
except Exception as e:
print(e)
self.send_500(message='Could not retrieve user information', explain='User information is missing in the database', request_origin=request_origin)
return
try:
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = parameters_map["jwt"]
decoded_jwt = decode_jwt(user_jwt)
jwt_user_addr = decoded_jwt["address"]
except Exception as e:
print(e)
pass
if isValidEthereumAddress(jwt_user_addr) is False:
self.send_400(message='Invalid Ethereum address', explain='Ethereum address pulled from user JWT is not valid', request_origin=request_origin)
return
if jwt_user_addr.lower() == user_addr.lower():
try:
observation_station_numbers = self.db.selectUserStationNumbers_JSON(user_addr)
for station in observation_station_numbers:
user_profile_json["observation_stations"].append(station)
except Exception as e:
print(e)
self.send_500(message='Observation information could not be retrieved', explain='Error in query to retrieve observer station information', request_origin=request_origin)
return
for k,v in user_profile_json.items():
if v == None or v =='NULL':
user_profile_json[k] = ""
user_profile = json.dumps(user_profile_json)
self.send_200_JSON_cache(user_profile, request_origin=request_origin)
elif path == '/object/influence':
try:
norad_number = parameters_map['norad_number']
except Exception as e:
print(e)
self.send_400(message='Norad number is missing from parameters', explain='Did not recieve norad_number in the URI', request_origin=request_origin)
return
if isValidNoradNumber(norad_number) is False:
self.send_400(message='Norad number is not valid', explain='Norad number is not a number from 1-99999', request_origin=request_origin)
return
try:
json_object = self.db.selectObjectInfluence_JSON(norad_number)
except Exception as e:
print(e)
self.send_500(message='Object influence could not be retrieved', explain='Query for object influce failed', request_origin=request_origin)
return
if json_object:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON(json.dumps({}), request_origin=request_origin)
return
elif path == '/object/info':
try:
norad_number = parameters_map['norad_number']
except Exception as e:
print(e)
self.send_400(message='Norad number is missing from parameters', explain='Did not recieve norad_number in the URI', request_origin=request_origin)
return
if isValidNoradNumber(norad_number) is False:
self.send_400(message='Norad number is not valid', explain='Norad number is not a number from 1-99999', request_origin=request_origin)
return
try:
json_object = self.db.selectObjectInfo_JSON(norad_number)
except Exception as e:
print(e)
self.send_500(message='Object info could not be retrieved', explain='Query for object info failed', request_origin=request_origin)
return
if json_object:
self.send_200_JSON_cache(json_object, request_origin=request_origin)
else:
self.send_200_JSON_cache(json.dumps({}), request_origin=request_origin)
return
elif path == '/object/history':
try:
norad_number = parameters_map['norad_number']
year = parameters_map["year"]
int_year = int(year)
except Exception as e:
print(e)
self.send_400(message='Missing norad number and/or year', explain='Parameters need a valid year and norad number', request_origin=request_origin)
return
if (isValidNoradNumber(norad_number) is False or
int_year < 1957 or
int_year > datetime.now().year):
self.send_400(message='Year is out of range or norad number is invalid', explain='year is less than 1957, greater than the current year, or norad number is not valid', request_origin=request_origin)
return
try:
real_entry = self.db.selectObjectHistoryByMonth_JSON(norad_number, year)
year_response = {
"December": [],
"November": [],
"October": [],
"September": [],
"August": [],
"July": [],
"June": [],
"May": [],
"April": [],
"March": [],
"February": [],
"January": []
}
for items in real_entry:
timestamp = datetime.fromtimestamp(float(items["observation_time"]))
month_string = timestamp.strftime("%B")
date = timestamp.day
items["observation_date"] = date
year_response[month_string].append(items)
response_body = json.dumps(year_response)
self.send_200_JSON_cache(response_body, request_origin=request_origin)
except Exception as e:
print(e)
self.send_500(message='Could not get history', explain='Object history querie failed', request_origin=request_origin)
return
elif path == '/object/userSightings':
response_body = []
try:
norad_number = parameters_map['norad_number']
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = parameters_map['jwt']
decoded_jwt = decode_jwt(user_jwt)
public_address = decoded_jwt["address"]
except Exception as e:
print(e)
self.send_400(message='Parameter(s) missing', explain='norad_number, jwt, address', request_origin=request_origin)
return
if (isValidNoradNumber(norad_number) is False or
isValidEthereumAddress is False):
self.send_400(message='Invalid norad number or Ethereum address', explain='Not proper format', request_origin=request_origin)
return
try:
response_body = self.db.selectObjectUserSightings_JSON(norad_number, public_address)
response_body = json.dumps(response_body)
self.send_200_JSON_cache(response_body, request_origin=request_origin)
except Exception as e:
print(e)
self.send_500(message='userSighting failed', explain='Query for userSightings was not successful', request_origin=request_origin)
return
elif path == "/tle/object":
try:
norad_number = parameters_map["norad_number"]
except Exception as e:
print(e)
self.send_400(message='Missing Norad number', explain='Norad number parameter missing', request_origin=request_origin)
return
if isValidNoradNumber(norad_number) is False:
self.send_400(message='Invalid Norad number', explain='Norad number is not valid', request_origin=request_origin)
return
try:
two_line_elements = self.db.selectTLE_single(norad_number)
except Exception as e:
print(e)
self.send_500(message='Could not get TLE', explain='Query failed to get TLE', request_origin=request_origin)
return
if two_line_elements:
self.send_200_text_cache(two_line_elements, request_origin=request_origin)
else:
self.send_200_text_cache("", request_origin=request_origin)
return
elif path == "/findObject":
try:
partial_string = parameters_map["objectName"]
except Exception as e:
print(e)
self.send_400(message='Object name missing', explain='Object name parameter missing', request_origin=request_origin)
return
try:
objects = self.db.selectFindObject(partial_string)
self.send_200_text_cache(objects, request_origin=request_origin)
except Exception as e:
self.send_500(message='Could not find object', explain='Query failed ot find object', request_origin=request_origin)
print(e)
return
elif path == '/heartbeat':
self.send_204(request_origin=request_origin)
return
elif path == '/errorTest':
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
elif path == '/cookieMonster':
try:
ck = cookies.SimpleCookie(self.headers.get('Cookie'))
print('COOKIES')
print(ck['jwt'].value)
except:
print('noo cookies :(')
C = cookies.SimpleCookie()
cookie_exp = time.strftime("%a, %d %Y %H:%M:%S %Z", time.gmtime(time.time() + 518400))
jwt_cookie = 'jwt=\"' + 'test' + '\"; Max-Age=6048000; Secure; HttpOnly;'
C.load(jwt_cookie)
print(cookie_exp)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Access-Control-Allow-Origin', request_origin=request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header("Set-Cookie", C.output(header='', sep=''))
self.end_headers()
return
else:
self.send_response(404)
self.db.clean()
def do_POST(self):
try:
request_origin = self.headers.get("Origin")
if request_origin in WEBSITE_ORIGINS:
request_origin = request_origin
else:
request_origin = '*'
print(request_origin)
except Exception as e:
print(e)
request_origin = False
response_body = b""
signed_public_key = '0'
try:
user_cookie = cookies.SimpleCookie(self.headers.get('Cookie'))
cookie_jwt = user_cookie['jwt'].value
print("COOKIE!")
print(user_cookie)
except Exception as e:
print(e)
user_cookie = False
cookie_jwt = False
try:
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
json_body = json.loads(body)
except Exception as e:
print(e)
self.send_400(message='Body improperly formatted', explain='Body is not properly formatted', request_origin=request_origin)
return
### GET NONCE ENDPOINT ###
if self.path == "/getNonce":
try:
addr = json_body["address"]
except Exception as e:
print(e)
self.send_400(message='Ethereum address missing', explain='Ethereum address is not a parameter', request_origin=request_origin)
return
if isValidEthereumAddress(addr) is False:
self.send_400(message='Invalid Ethereum address', explain='Ethereum address is not valid', request_origin=request_origin)
return
try:
email = json_body["email"]
if isValidEmailAddress is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
results = self.db.selectObserverAddressFromEmail(email)
if len(results) == 42:
self.send_200_JSON(json.dumps({}), request_origin=request_origin)
return
except Exception as e:
print(e)
pass
try:
public_address_count = self.db.getObserverCountByID(public_address=addr)
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
random_number = str(secrets.randbits(256))
response_message = '{"nonce":\"%s\"}' % random_number
if public_address_count[0] == None or public_address_count[0] == 0:
# New User
try:
self.db.addObserver(addr, "NULL", 0, "NULL")
self.db.updateObserverNonceBytes(nonce=random_number, public_address=addr)
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
elif public_address_count[0] >= 1:
# Old User
try:
self.db.updateObserverNonceBytes(nonce=random_number, public_address=addr)
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
self.send_200_JSON(response_message, request_origin=request_origin)
elif self.path == "/signup":
try:
addr = json_body["address"]
if isValidEthereumAddress(addr) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
old_nonce = self.db.getObserverNonceBytes(addr)
email = json_body["email"]
signed_message = json_body["signedMessage"]
payload = json_body["secret"]
except Exception as e:
print(e)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
try:
if (isValidEmailAddress(email) is False or
isValidSecret(payload) is False):
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
nonce = old_nonce.encode('utf-8')
self.db.updateObserverNonceBytes(nonce='NULL', public_address=addr)
message_hash = sha3.keccak_256(nonce).hexdigest()
message_hash = encode_defunct(hexstr=message_hash)
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
try:
signed_public_key = Account.recover_message(message_hash, signature=signed_message)
except Exception as e:
print(e)
print('message could not be checked')
try:
if signed_public_key.lower() == addr.lower():
email_from_addr = self.db.selectEmailFromObserverAddress(addr)
if email_from_addr == None or email_from_addr == '' or email_from_addr == b'NULL':
if email != None or email != 'null' or email != 'NULL' or email != '':
try:
self.db.updateObserverEmail(email, addr)
message_text = 'Save this email: TruSat account recovery info for ' + email + '\n\n' + \
'To log into TruSat, you\'ll need your password AND this secret code:\n\n' + payload + \
'\n\nThis email is the only time we can send you this code. TruSat cannot reset your password for you. Please save this email forever and make a note of the password you used.\n\n' + \
'Login here: trusat.org/login\n\n' + \
'Why do we do it this way? Read more (trusat.org/faq)\n\n' + \
'Questions? Please email: [email protected]'
data = {"from": "TruSat Help <" + MAILGUN_EMAIL_ADDRESS + ">",
"to": [email],
"subject": "TruSat - Save this email: Recovery Info",
"text": message_text}
response = requests.post(
"https://api.mailgun.net/v3/beta.trusat.org/messages",
auth=("api", MAILGUN_API_KEY),
data=data
)
if response.status_code != 200:
print(response)
print("Email failed to send.")
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
self.send_200_JSON(json.dumps({'result': True}), request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
except Exception as e:
print(e)
else:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
### LOGIN ENDPOINT ###
elif self.path == "/login":
try:
addr = json_body["address"]
old_nonce = self.db.getObserverNonceBytes(addr)
signed_message = json_body["signedMessage"]
except Exception as e:
print(e)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
if isValidEthereumAddress(addr) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
nonce = old_nonce.encode('utf-8')
self.db.updateObserverNonceBytes(nonce='NULL', public_address=addr)
message_hash = sha3.keccak_256(nonce).hexdigest()
message_hash = encode_defunct(hexstr=message_hash)
try:
signed_public_key = Account.recover_message(message_hash, signature=signed_message)
except Exception as e:
print(e)
print('message could not be checked')
try:
email = json_body["email"]
secret = json_body["secret"]
except Exception as e:
print(e)
email = None
secret = None
if (email is not None and isValidEmailAddress(email) is False or
secret is not None and isValidSecret(secret) is False):
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
if signed_public_key.lower() == addr.lower():
email_from_addr = self.db.selectEmailFromObserverAddress(addr)
if email_from_addr == None or email_from_addr == '' or email_from_addr == b'NULL':
if email != None:
try:
self.db.updateObserverEmail(email, addr)
email_status = google_email.send_email(email, secret)
if email_status == False:
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
self.send_200_JSON(json.dumps({'result':True}), request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
encoded_jwt = encode_jwt(addr.lower())
self.db.updateObserverJWT(encoded_jwt, '', addr)
frontend_exp = time.time() + 604800
response_message = b'{"jwt": "'
response_message += encoded_jwt
response_message += b'", "address": "' + bytes(addr.lower(), 'utf-8') + b'", "exp": ' + bytes(str(frontend_exp), 'utf-8') + b' } '
C = cookies.SimpleCookie()
jwt_cookie = 'jwt=\"' + encoded_jwt.decode('utf-8') + '\"; Max-Age=604800; Secure; HttpOnly'
C.load(jwt_cookie)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header("Set-Cookie", C.output(header='', sep=''))
self.end_headers()
self.wfile.write(response_message)
return
else:
print("Login Failed")
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
elif self.path == "/editProfile":
try:
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = json_body["jwt"]
decoded_jwt = decode_jwt(user_jwt)
public_address = decoded_jwt["address"]
observer_id = self.db.selectObserverIDFromAddress(public_address)
except Exception as e:
print(e)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
if isValidEthereumAddress(public_address) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
try:
username = json_body["username"]
if (username != "null" and
username != None and
isValidUserSetting(username)):
self.db.updateObserverUsername(username, public_address)
except Exception as e:
print("Username not being updated")
print(e)
try:
bio = json_body["bio"]
if (bio != "null" and
bio != None and
isValidUserSetting(bio)):
self.db.updateObserverBio(bio, public_address)
except Exception as e:
print("Bio not being updated")
print(e)
try:
location = json_body["location"]
if (location != "null" and
location != None and
isValidUserSetting(location)):
self.db.updateObserverLocation(location, public_address)
except Exception as e:
print("Location not being updated")
print(e)
try:
deleted_stations = json_body["deleted_stations"]
for station in deleted_stations:
result = self.db.deleteStation(station, observer_id)
if result is not True:
print("failed to delete station")
except Exception as e:
print("No stations to delete")
print(e)
try:
station_name = json_body['new_station_names']
print('STATION')
for station in station_name:
result = self.db.updateStationName(station, station_name[station], observer_id)
if result is not True:
print("failed ot update name for " + station + " " + station_name[station])
except Exception as e:
print('No station name change')
print(e)
try:
station_notes = json_body['new_station_notes']
for station in station_notes:
result = self.db.updateStationNotes(station, station_notes[station], observer_id)
if result is not True:
print("failed ot update notes for " + station + ' ' + station_notes[station])
except Exception as e:
print('No station notes change')
print(e)
self.send_200_JSON(response_body, request_origin=request_origin)
elif self.path == '/claimAccount':
try:
email = json_body['email']
except Exception as e:
print(e)
self.send_200_JSON(json.dumps({'result': False}), request_origin=request_origin)
return
if isValidEmailAddress(email) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
try:
with open('unsafe_private.pem', 'r') as file:
private_key = file.read()
private_rsa_key = load_pem_private_key(bytes(private_key, 'utf-8'), password=None, backend=default_backend())
results = self.db.selectObserverAddressFromEmail(email)
if results is not None:
results = results.decode('utf-8')
else:
self.send_200_JSON(json.dumps({'result': False}), request_origin=request_origin)
return
old_password = self.db.selectObserverPasswordFromAddress(results)
if old_password is not None:
old_password = old_password.decode('utf-8')
try:
if decode_jwt(old_password):
self.send_200_JSON(json.dumps({'result': True}), request_origin=request_origin)
return
except:
print('User already claimed account.')
number = str(secrets.randbits(64))
jwt_payload = {
'email': email,
'secret': number,
'exp': datetime.utcnow() + timedelta(minutes=30)
}
encoded_jwt = encode(jwt_payload, private_rsa_key, algorithm='RS256')
self.db.updateObserverPassword(encoded_jwt.decode('utf-8'), results)
message_text = 'Please use the following link to verify your ownership of the following email ' + \
email + '\n\nhttps://trusat.org/claim/' + encoded_jwt.decode('utf-8') + '\nThis link will expire in 24 hours.' + \
'\n\nIf you did not request recovery of your account please contact us at:\[email protected]\n'
data = {"from": "TruSat Help <" + MAILGUN_EMAIL_ADDRESS + ">",
"to": [email],
"subject": "TruSat - Recover Account",
"text": message_text}
response = requests.post(
"https://api.mailgun.net/v3/beta.trusat.org/messages",
auth=("api", MAILGUN_API_KEY),
data=data
)
if response.status_code != 200:
print(response)
print("Email failed to send.")
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
else:
self.send_200_JSON(json.dumps({'result': True}), request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
self.send_200_JSON(json.dumps({'result': False}), request_origin=request_origin)
elif self.path == "/verifyClaimAccount":
try:
message_text = json_body["secret"]
address = json_body["address"]
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = json_body["jwt"]
except Exception as e:
print(e)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
if (isValidEthereumAddress(address) is False or
isValidSecret(message_text) is False):
print("Ëthereum address:")
print(address)
print("Secret:")
print(message_text)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
#Lookup number and old address
try:
decoded_jwt = decode_jwt(user_jwt)
secret = decoded_jwt["secret"]
to = decoded_jwt["email"]
old_address = self.db.selectObserverAddressFromPassword(user_jwt).decode('utf-8')
if old_address is None:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
#replace address
encoded_jwt = encode_jwt(address)
self.db.updateObserverAddress(address, old_address)
message_text = 'Save this email: TruSat account recovery info for ' + to + '\n\n' + \
'To log into TruSat, you\'ll need your password AND this secret code:\n\n' + message_text + \
'\n\nThis email is the only time we can send you this code. TruSat cannot reset your password for you. Please save this email forever and make a note of the password you used.\n\n' + \
'Login here: trusat.org/login\n\n' + \
'Why do we do it this way? Read more (trusat.org/faq)\n\n' + \
'Questions? Please email: [email protected]'
data = {"from": "TruSat Help <" + MAILGUN_EMAIL_ADDRESS + ">",
"to": [to],
"subject": "TruSat - Save this email: Recovery Info",
"text": message_text}
response = requests.post(
"https://api.mailgun.net/v3/beta.trusat.org/messages",
auth=("api", MAILGUN_API_KEY),
data=data
)
if response.status_code != 200:
print(response)
print("Email failed to send.")
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
self.db.updateObserverJWT(encoded_jwt, "", address)
frontend_exp = time.time() + 604800
response_message = b'{"jwt": "'
response_message += encoded_jwt
response_message += b'", "address": "' + bytes(address.lower(), 'utf-8') + b'", "exp": ' + bytes(str(frontend_exp), 'utf-8') + b' } '
C = cookies.SimpleCookie()
jwt_cookie = 'jwt=\"' + encoded_jwt.decode('utf-8') + '\"; Max-Age=604800; Secure; HttpOnly'
C.load(jwt_cookie)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Access-Control-Allow-Origin', request_origin)
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header("Set-Cookie", C.output(header='', sep=''))
self.end_headers()
self.wfile.write(response_message)
self.db.updateObserverPassword('NULL', address)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
elif self.path == "/submitObservation":
try:
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = json_body["jwt"]
decoded_jwt = decode_jwt(user_jwt)
user_addr = decoded_jwt["address"]
except Exception as e:
print(e)
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
if isValidEthereumAddress(user_addr) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
try:
single = json_body["single"]
except Exception as e:
print(e)
parsed_iod = []
try:
multiple = json_body["multiple"]
results = self.db.addObserverParsedIOD(multiple)
if results is not False:
(success, error_messages) = results
else:
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
success_length = {'success':success, 'error_messages':error_messages}
self.send_200_JSON(json.dumps(success_length), request_origin=request_origin)
elif self.path == "/seesat":
email_information = json_body["message"]["data"]
email_history = urlsafe_b64decode(email_information).decode('utf-8')
email_history = json.loads(email_history)
print(email_history)
print(google_email.get_email_history(email_history['historyId']))
self.send_204(request_origin=request_origin)
elif self.path == "/getObservationStations":
try:
jwt_user_addr = decoded_jwt["address"]
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = json_body["jwt"]
decoded_jwt = decode_jwt(user_jwt)
except Exception as e:
print(e)
pass
if jwt_user_addr != decoded_jwt["address"]:
self.send_400(message='Invalid Ethereum address for user', explain='User gave Ethereum address they do not have access to', request_origin=request_origin)
return
if isValidEthereumAddress(jwt_user_addr) is False:
self.send_400(message='Invalid Ethereum address', explain='Ethereum address pulled from user JWT is not valid', request_origin=request_origin)
return
try:
observation_station_numbers = self.db.selectUserStationNumbers_JSON(jwt_user_addr)
self.send_200_JSON(json.dumps(observation_station_numbers), request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='Could not get station information', explain='Query to get observation stations has failed', request_origin=request_origin)
elif self.path == '/generateStation':
try:
print(cookie_jwt)
print(user_cookie)
if cookie_jwt is not False:
user_jwt = cookie_jwt
else:
user_jwt = json_body["jwt"]
decoded_jwt = decode_jwt(user_jwt)
user_addr = decoded_jwt["address"]
station_name = json_body["station"]
latitude = json_body["latitude"]
longitude = json_body["longitude"]
elevation = json_body["elevation"]
notes = json_body["notes"]
except Exception as e:
print(e)
self.send_400(message='Missing parameter(s)', explain='One or more of the required parameters are missing form the request body', request_origin=request_origin)
return
if isValidEthereumAddress(user_addr) is False:
self.send_400(message='message', explain='explanation', request_origin=request_origin)
return
try:
user_id = self.db.selectObserverIDFromAddress(user_addr)
latest_station = self.db.selectLatestStationID()
if latest_station[0:1] == 'T':
station_index = latest_station[1:]
station_index = int(station_index, 36) + 1
# Prevent I or O character
if (station_index & 18) == 18 or (station_index & 24) == 24:
station_index = station_index + 1
if (station_index & 648) == 648 or (station_index & 864) == 864:
station_index = station_index + 36
if (station_index & 23328) == 23328 or (station_index & 31104) == 31104:
station_index = station_index + 1296
station_index = numpy.base_repr(station_index, 36)
station_id = 'T' + station_index.rjust(3, '0')
else:
station_id = 'T000'
print(station_id)
station_result = self.db.addStation(station_id, user_id, latitude, longitude, elevation, station_name, notes)
if station_result is None:
self.send_500(message='Could not add station', explain='Query failed to add station for user', request_origin=request_origin)
if station_result is False:
self.send_400(message='User has too many stations', explain='User has 10 or more stations', request_origin=request_origin)
# get last station
# increment station
# remove O and I
# set and return
self.send_200_JSON(json.dumps({'station_id': station_id}), request_origin=request_origin)
return
except Exception as e:
print(e)
self.send_500(message='message', explain='explanation', request_origin=request_origin)
return
else:
self.send_response(404)
self.end_headers()
self.wfile.write(b'')
return
self.db.clean()
httpd = ThreadingHTTPServer(('', PORT_NUMBER), SimpleHTTPRequestHandler)
httpd.timeout = 10
if os.getenv('TRUSAT_DISABLE_HTTPS', False):
print('HTTPS disabled!')
else:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile='./privkey.pem', certfile='./fullchain.pem', server_side=True)
httpd.serve_forever()
|
[] |
[] |
[
"TRUSAT_DISABLE_HTTPS",
"WEBSITE_ORIGINS",
"MAILGUN_EMAIL_ADDRESS",
"MAILGUN_API_KEY"
] |
[]
|
["TRUSAT_DISABLE_HTTPS", "WEBSITE_ORIGINS", "MAILGUN_EMAIL_ADDRESS", "MAILGUN_API_KEY"]
|
python
| 4 | 0 | |
x/ref/lib/discovery/advertise.go
|
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package discovery
import (
"fmt"
"sync"
"time"
"v.io/v23/context"
"v.io/v23/discovery"
"v.io/v23/naming"
"v.io/v23/security"
"v.io/x/ref/lib/stats"
)
func (d *idiscovery) advertise(ctx *context.T, session sessionId, ad *discovery.Advertisement, visibility []security.BlessingPattern) (<-chan struct{}, error) {
if !ad.Id.IsValid() {
var err error
if ad.Id, err = discovery.NewAdId(); err != nil {
return nil, err
}
}
if err := validateAd(ad); err != nil {
return nil, NewErrBadAdvertisement(ctx, err)
}
adinfo := &AdInfo{Ad: *ad}
if err := encrypt(ctx, adinfo, visibility); err != nil {
return nil, err
}
hashAd(adinfo)
adinfo.TimestampNs = d.newAdTimestampNs()
ctx, cancel, err := d.addTask(ctx)
if err != nil {
return nil, err
}
id := adinfo.Ad.Id
if !d.addAd(id, session) {
cancel()
d.removeTask(ctx)
return nil, NewErrAlreadyBeingAdvertised(ctx, id)
}
subtask := &adSubtask{parent: ctx}
d.adMu.Lock()
d.adSubtasks[id] = subtask
d.adMu.Unlock()
done := make(chan struct{})
stop := func() {
d.stopAdvertising(id)
d.dirServer.unpublish(id)
d.removeAd(id)
d.removeTask(ctx)
close(done)
}
// Lock the subtask to prevent any update from directory server endpoint changes while
// the advertising is being started to not lose any endpoint change during starting.
subtask.mu.Lock()
d.dirServer.publish(adinfo)
subtask.stop, err = d.startAdvertising(ctx, adinfo)
subtask.mu.Unlock()
if err != nil {
cancel()
stop()
return nil, err
}
d.adStopTrigger.Add(stop, ctx.Done())
return done, nil
}
func (d *idiscovery) newAdTimestampNs() int64 {
now := time.Now()
timestampNs := now.UnixNano()
d.adMu.Lock()
if d.adTimestampNs >= timestampNs {
timestampNs = d.adTimestampNs + 1
}
d.adTimestampNs = timestampNs
d.adMu.Unlock()
return timestampNs
}
func (d *idiscovery) addAd(id discovery.AdId, session sessionId) bool {
d.adMu.Lock()
if _, exist := d.adSessions[id]; exist {
d.adMu.Unlock()
return false
}
d.adSessions[id] = session
d.adMu.Unlock()
return true
}
func (d *idiscovery) removeAd(id discovery.AdId) {
d.adMu.Lock()
delete(d.adSessions, id)
d.adMu.Unlock()
}
func (d *idiscovery) getAdSession(id discovery.AdId) sessionId {
d.adMu.Lock()
session := d.adSessions[id]
d.adMu.Unlock()
return session
}
func (d *idiscovery) startAdvertising(ctx *context.T, adinfo *AdInfo) (func(), error) {
statName := naming.Join(d.statsPrefix, "ad", adinfo.Ad.Id.String())
stats.NewStringFunc(statName, func() string { return fmt.Sprint(*adinfo) })
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for _, plugin := range d.plugins {
wg.Add(1)
if err := plugin.Advertise(ctx, adinfo, wg.Done); err != nil {
cancel()
return nil, err
}
}
stop := func() {
stats.Delete(statName) //nolint:errcheck
cancel()
wg.Wait()
}
return stop, nil
}
func (d *idiscovery) stopAdvertising(id discovery.AdId) {
d.adMu.Lock()
subtask := d.adSubtasks[id]
delete(d.adSubtasks, id)
d.adMu.Unlock()
if subtask == nil {
return
}
subtask.mu.Lock()
if subtask.stop != nil {
subtask.stop()
subtask.stop = nil
}
subtask.mu.Unlock()
}
func (d *idiscovery) updateAdvertising(adinfo *AdInfo) {
d.adMu.Lock()
subtask := d.adSubtasks[adinfo.Ad.Id]
if subtask == nil {
d.adMu.Unlock()
return
}
d.adMu.Unlock()
subtask.mu.Lock()
defer subtask.mu.Unlock()
if subtask.stop == nil {
return
}
subtask.stop()
ctx := subtask.parent
var err error
subtask.stop, err = d.startAdvertising(ctx, adinfo)
if err != nil {
ctx.Error(err)
d.cancelTask(ctx)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
werkzeug/serving.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import thread
import signal
import subprocess
from urllib import unquote
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug.urls import _safe_urlsplit
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = _safe_urlsplit(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': unquote(request_url.path),
'QUERY_STRING': request_url.query,
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
app = self.server.app
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
code, msg = status.split(None, 1)
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is str, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
# make sure the headers are sent
if not headers_sent:
write('')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(app)
except (socket.error, socket.timeout), e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout), e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
for module in sys.modules.values():
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt':
for key, value in new_environ.iteritems():
if isinstance(value, unicode):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
print 'No application supplied, or too much. See --help'
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"WERKZEUG_RUN_MAIN"
] |
[]
|
["WERKZEUG_RUN_MAIN"]
|
python
| 1 | 0 | |
cmd/bench.go
|
/*
* JuiceFS, Copyright 2021 Juicedata, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/juicedata/juicefs/pkg/utils"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
var resultRange = map[string][4]float64{
"bigwr": {100, 200, 10, 50},
"bigrd": {100, 200, 10, 50},
"smallwr": {12.5, 20, 50, 80},
"smallrd": {50, 100, 10, 20},
"stat": {20, 1000, 1, 5},
"fuse": {0, 0, 0.5, 2},
"meta": {0, 0, 2, 5},
"put": {0, 0, 100, 200},
"get": {0, 0, 100, 200},
"delete": {0, 0, 30, 100},
"cachewr": {0, 0, 10, 20},
"cacherd": {0, 0, 1, 5},
}
type benchCase struct {
bm *benchmark
name string
fsize, bsize int // file/block size in Bytes
fcount, bcount int // file/block count
wbar, rbar, sbar *utils.Bar // progress bar for write/read/stat
}
type benchmark struct {
tty bool
big, small *benchCase
threads int
tmpdir string
}
func (bc *benchCase) writeFiles(index int) {
for i := 0; i < bc.fcount; i++ {
fname := fmt.Sprintf("%s/%s.%d.%d", bc.bm.tmpdir, bc.name, index, i)
fp, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
logger.Fatalf("Failed to open file %s: %s", fname, err)
}
buf := make([]byte, bc.bsize)
_, _ = rand.Read(buf)
for j := 0; j < bc.bcount; j++ {
if _, err = fp.Write(buf); err != nil {
logger.Fatalf("Failed to write file %s: %s", fname, err)
}
bc.wbar.Increment()
}
_ = fp.Close()
}
}
func (bc *benchCase) readFiles(index int) {
for i := 0; i < bc.fcount; i++ {
fname := fmt.Sprintf("%s/%s.%d.%d", bc.bm.tmpdir, bc.name, index, i)
fp, err := os.Open(fname)
if err != nil {
logger.Fatalf("Failed to open file %s: %s", fname, err)
}
buf := make([]byte, bc.bsize)
for j := 0; j < bc.bcount; j++ {
if n, err := fp.Read(buf); err != nil || n != bc.bsize {
logger.Fatalf("Failed to read file %s: %d %s", fname, n, err)
}
bc.rbar.Increment()
}
_ = fp.Close()
}
}
func (bc *benchCase) statFiles(index int) {
for i := 0; i < bc.fcount; i++ {
fname := fmt.Sprintf("%s/%s.%d.%d", bc.bm.tmpdir, bc.name, index, i)
if _, err := os.Stat(fname); err != nil {
logger.Fatalf("Failed to stat file %s: %s", fname, err)
}
bc.sbar.Increment()
}
}
func (bc *benchCase) run(test string) float64 {
var fn func(int)
switch test {
case "write":
fn = bc.writeFiles
case "read":
fn = bc.readFiles
case "stat":
fn = bc.statFiles
} // default: fatal
var wg sync.WaitGroup
start := time.Now()
for i := 0; i < bc.bm.threads; i++ {
index := i
wg.Add(1)
go func() {
fn(index)
wg.Done()
}()
}
wg.Wait()
return time.Since(start).Seconds()
}
// blockSize, bigSize in MiB; smallSize in KiB
func newBenchmark(tmpdir string, blockSize, bigSize, smallSize, smallCount, threads int) *benchmark {
bm := &benchmark{threads: threads, tmpdir: tmpdir}
if bigSize > 0 {
bm.big = bm.newCase("bigfile", bigSize<<20, 1, blockSize<<20)
}
if smallSize > 0 && smallCount > 0 {
bm.small = bm.newCase("smallfile", smallSize<<10, smallCount, blockSize<<20)
}
return bm
}
func (bm *benchmark) newCase(name string, fsize, fcount, bsize int) *benchCase {
bc := &benchCase{
bm: bm,
name: name,
fsize: fsize,
fcount: fcount,
bsize: bsize,
}
if fsize <= bsize {
bc.bcount = 1
bc.bsize = fsize
} else {
bc.bcount = (fsize-1)/bsize + 1
bc.fsize = bc.bcount * bsize
}
return bc
}
func (bm *benchmark) colorize(item string, value, cost float64, prec int) (string, string) {
svalue := strconv.FormatFloat(value, 'f', prec, 64)
scost := strconv.FormatFloat(cost, 'f', 2, 64)
if bm.tty {
r, ok := resultRange[item]
if !ok {
logger.Fatalf("Invalid item: %s", item)
}
if item == "smallwr" || item == "smallrd" || item == "stat" {
r[0] *= float64(bm.threads)
r[1] *= float64(bm.threads)
}
var color int
if value > r[1] { // max
color = GREEN
} else if value > r[0] { // min
color = YELLOW
} else {
color = RED
}
svalue = fmt.Sprintf("%s%dm%s%s", COLOR_SEQ, color, svalue, RESET_SEQ)
if cost < r[2] { // min
color = GREEN
} else if cost < r[3] { // max
color = YELLOW
} else {
color = RED
}
scost = fmt.Sprintf("%s%dm%s%s", COLOR_SEQ, color, scost, RESET_SEQ)
}
return svalue, scost
}
func (bm *benchmark) printResult(result [][3]string) {
var rawmax, max [3]int
for _, l := range result {
for i := 0; i < 3; i++ {
if len(l[i]) > rawmax[i] {
rawmax[i] = len(l[i])
}
}
}
max = rawmax
if bm.tty {
max[1] -= 11 // no color chars
max[2] -= 11
}
var b strings.Builder
for i := 0; i < 3; i++ {
b.WriteByte('+')
b.WriteString(strings.Repeat("-", max[i]+2))
}
b.WriteByte('+')
divider := b.String()
fmt.Println(divider)
b.Reset()
header := []string{"ITEM", "VALUE", "COST"}
for i := 0; i < 3; i++ {
b.WriteString(" | ")
b.WriteString(padding(header[i], max[i], ' '))
}
b.WriteString(" |")
fmt.Println(b.String()[1:])
fmt.Println(divider)
for _, l := range result {
b.Reset()
for i := 0; i < 3; i++ {
b.WriteString(" | ")
if spaces := rawmax[i] - len(l[i]); spaces > 0 {
b.WriteString(strings.Repeat(" ", spaces))
}
b.WriteString(l[i])
}
b.WriteString(" |")
fmt.Println(b.String()[1:])
}
fmt.Println(divider)
}
func bench(ctx *cli.Context) error {
setLoggerLevel(ctx)
/* --- Pre-check --- */
if ctx.Uint("block-size") == 0 || ctx.Uint("threads") == 0 {
return os.ErrInvalid
}
if ctx.NArg() < 1 {
logger.Fatalln("PATH must be provided")
}
tmpdir, err := filepath.Abs(ctx.Args().First())
if err != nil {
logger.Fatalf("Failed to get absolute path of %s: %s", ctx.Args().First(), err)
}
tmpdir = filepath.Join(tmpdir, fmt.Sprintf("__juicefs_benchmark_%d__", time.Now().UnixNano()))
bm := newBenchmark(tmpdir, int(ctx.Uint("block-size")), int(ctx.Uint("big-file-size")),
int(ctx.Uint("small-file-size")), int(ctx.Uint("small-file-count")), int(ctx.Uint("threads")))
if bm.big == nil && bm.small == nil {
return os.ErrInvalid
}
var purgeArgs []string
if os.Getuid() != 0 {
purgeArgs = append(purgeArgs, "sudo")
}
switch runtime.GOOS {
case "darwin":
purgeArgs = append(purgeArgs, "purge")
case "linux":
purgeArgs = append(purgeArgs, "/bin/sh", "-c", "echo 3 > /proc/sys/vm/drop_caches")
default:
logger.Fatal("Currently only support Linux/macOS")
}
/* --- Prepare --- */
if _, err := os.Stat(bm.tmpdir); os.IsNotExist(err) {
if err = os.MkdirAll(bm.tmpdir, 0755); err != nil {
logger.Fatalf("Failed to create %s: %s", bm.tmpdir, err)
}
}
var statsPath string
for mp := filepath.Dir(bm.tmpdir); mp != "/"; mp = filepath.Dir(mp) {
if _, err := os.Stat(filepath.Join(mp, ".stats")); err == nil {
statsPath = filepath.Join(mp, ".stats")
break
}
}
dropCaches := func() {
if os.Getenv("SKIP_DROP_CACHES") != "true" {
if err := exec.Command(purgeArgs[0], purgeArgs[1:]...).Run(); err != nil {
logger.Warnf("Failed to clean kernel caches: %s", err)
}
} else {
logger.Warnf("Clear cache operation has been skipped")
}
}
if os.Getuid() != 0 {
fmt.Println("Cleaning kernel cache, may ask for root privilege...")
}
dropCaches()
bm.tty = isatty.IsTerminal(os.Stdout.Fd())
progress := utils.NewProgress(!bm.tty, false)
if b := bm.big; b != nil {
total := int64(bm.threads * b.fcount * b.bcount)
b.wbar = progress.AddCountBar("Write big", total)
b.rbar = progress.AddCountBar("Read big", total)
}
if s := bm.small; s != nil {
total := int64(bm.threads * s.fcount * s.bcount)
s.wbar = progress.AddCountBar("Write small", total)
s.rbar = progress.AddCountBar("Read small", total)
s.sbar = progress.AddCountBar("Stat file", int64(bm.threads*s.fcount))
}
/* --- Run Benchmark --- */
var stats map[string]float64
if statsPath != "" {
stats = readStats(statsPath)
}
var result [][3]string
if b := bm.big; b != nil {
cost := b.run("write")
line := [3]string{"Write big file"}
line[1], line[2] = bm.colorize("bigwr", float64((b.fsize>>20)*b.fcount*bm.threads)/cost, cost/float64(b.fcount), 2)
line[1] += " MiB/s"
line[2] += " s/file"
result = append(result, line)
dropCaches()
cost = b.run("read")
line[0] = "Read big file"
line[1], line[2] = bm.colorize("bigrd", float64((b.fsize>>20)*b.fcount*bm.threads)/cost, cost/float64(b.fcount), 2)
line[1] += " MiB/s"
line[2] += " s/file"
result = append(result, line)
}
if s := bm.small; s != nil {
cost := s.run("write")
line := [3]string{"Write small file"}
line[1], line[2] = bm.colorize("smallwr", float64(s.fcount*bm.threads)/cost, cost*1000/float64(s.fcount), 1)
line[1] += " files/s"
line[2] += " ms/file"
result = append(result, line)
dropCaches()
cost = s.run("read")
line[0] = "Read small file"
line[1], line[2] = bm.colorize("smallrd", float64(s.fcount*bm.threads)/cost, cost*1000/float64(s.fcount), 1)
line[1] += " files/s"
line[2] += " ms/file"
result = append(result, line)
dropCaches()
cost = s.run("stat")
line[0] = "Stat file"
line[1], line[2] = bm.colorize("stat", float64(s.fcount*bm.threads)/cost, cost*1000/float64(s.fcount), 1)
line[1] += " files/s"
line[2] += " ms/file"
result = append(result, line)
}
progress.Done()
/* --- Clean-up --- */
if err := exec.Command("rm", "-rf", bm.tmpdir).Run(); err != nil {
logger.Warnf("Failed to cleanup %s: %s", bm.tmpdir, err)
}
/* --- Report --- */
fmt.Println("Benchmark finished!")
fmt.Printf("BlockSize: %d MiB, BigFileSize: %d MiB, SmallFileSize: %d KiB, SmallFileCount: %d, NumThreads: %d\n",
ctx.Uint("block-size"), ctx.Uint("big-file-size"), ctx.Uint("small-file-size"), ctx.Uint("small-file-count"), ctx.Uint("threads"))
if stats != nil {
stats2 := readStats(statsPath)
diff := func(item string) float64 {
return stats2["juicefs_"+item] - stats["juicefs_"+item]
}
show := func(title, nick, item string) {
count := diff(item + "_total")
var cost float64
if count > 0 {
cost = diff(item+"_sum") * 1000 / count
}
line := [3]string{title}
line[1], line[2] = bm.colorize(nick, count, cost, 0)
line[1] += " operations"
line[2] += " ms/op"
result = append(result, line)
}
show("FUSE operation", "fuse", "fuse_ops_durations_histogram_seconds")
show("Update meta", "meta", "transaction_durations_histogram_seconds")
show("Put object", "put", "object_request_durations_histogram_seconds_PUT")
show("Get object", "get", "object_request_durations_histogram_seconds_GET")
show("Delete object", "delete", "object_request_durations_histogram_seconds_DELETE")
show("Write into cache", "cachewr", "blockcache_write_hist_seconds")
show("Read from cache", "cacherd", "blockcache_read_hist_seconds")
var fmtString string
if bm.tty {
greenSeq := fmt.Sprintf("%s%dm", COLOR_SEQ, GREEN)
fmtString = fmt.Sprintf("Time used: %s%%.1f%s s, CPU: %s%%.1f%s%%%%, Memory: %s%%.1f%s MiB\n",
greenSeq, RESET_SEQ, greenSeq, RESET_SEQ, greenSeq, RESET_SEQ)
} else {
fmtString = "Time used: %.1f s, CPU: %.1f%%, Memory: %.1f MiB\n"
}
fmt.Printf(fmtString, diff("uptime"), diff("cpu_usage")*100/diff("uptime"), stats2["juicefs_memory"]/1024/1024)
}
bm.printResult(result)
return nil
}
func benchFlags() *cli.Command {
return &cli.Command{
Name: "bench",
Usage: "run benchmark to read/write/stat big/small files",
Action: bench,
ArgsUsage: "PATH",
Flags: []cli.Flag{
&cli.UintFlag{
Name: "block-size",
Value: 1,
Usage: "block size in MiB",
},
&cli.UintFlag{
Name: "big-file-size",
Value: 1024,
Usage: "size of big file in MiB",
},
&cli.UintFlag{
Name: "small-file-size",
Value: 128,
Usage: "size of small file in KiB",
},
&cli.UintFlag{
Name: "small-file-count",
Value: 100,
Usage: "number of small files",
},
&cli.UintFlag{
Name: "threads",
Aliases: []string{"p"},
Value: 1,
Usage: "number of concurrent threads",
},
},
}
}
|
[
"\"SKIP_DROP_CACHES\""
] |
[] |
[
"SKIP_DROP_CACHES"
] |
[]
|
["SKIP_DROP_CACHES"]
|
go
| 1 | 0 | |
nuclio/query/query-example_test.go
|
package query
import (
"fmt"
"github.com/nuclio/nuclio-test-go"
"testing"
"time"
"os"
)
func TestQuery(t *testing.T) {
data := nutest.DataBind{Name: "db0", Url: os.Getenv("V3IO_URL"), Container: "1"}
tc, err := nutest.NewTestContext(Handler, false, &data)
if err != nil {
t.Fatal(err)
}
err = tc.InitContext(InitContext)
if err != nil {
t.Fatal(err)
}
testEvent := nutest.TestEvent{
Body: []byte(queryEvent),
}
resp, err := tc.Invoke(&testEvent)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
resp, err = tc.Invoke(&testEvent)
time.Sleep(time.Second * 1)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
fmt.Println(resp)
time.Sleep(time.Second * 10)
}
|
[
"\"V3IO_URL\""
] |
[] |
[
"V3IO_URL"
] |
[]
|
["V3IO_URL"]
|
go
| 1 | 0 | |
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a web server for collecting stats.
"""
__author__ = "Andreas Ehrlund"
__version__ = "0.2.0"
__license__ = "MIT"
import os
from datetime import datetime, timedelta
import requests
from flask import Flask
from flask import request
from flask import jsonify
from pymongo import MongoClient
app = Flask(__name__)
mdb_client = MongoClient(os.environ.get("MONGODB_URI", None), retryWrites=False)
db_name = os.environ.get("MONGODB_URI", None).rsplit("/", 1)[-1]
db = mdb_client[db_name]
def geolocate_ip(ip_addr):
api_key = os.environ.get("IPSTACK_API_KEY", None)
try:
r = requests.get(f"http://api.ipstack.com/{ip_addr}?access_key={api_key}")
return r.json()
except Exception as e:
print(e.args)
return None
@app.route("/pymkm", methods=["GET", "POST"])
def pymkm():
if request.method == "POST":
json_data = request.get_json()
geolocation_data = geolocate_ip(
request.environ.get("HTTP_X_FORWARDED_FOR", request.remote_addr)
)
if "version" in json_data and "command" in json_data:
data = {
"date": datetime.utcnow(),
"version": json_data["version"],
"uuid": json_data["uuid"],
"command": json_data["command"],
"ip": request.remote_addr,
}
try:
geo_data = {
"lat": geolocation_data["latitude"],
"long": geolocation_data["longitude"],
"country_code": geolocation_data["country_code"],
}
data.update(geo_data)
except Exception as e:
pass
# store data row
try:
collection = db.reports
collection.insert_one(data)
except Exception as err:
resp = jsonify(success=False)
print(err)
else:
resp = jsonify(success=True)
else:
resp = jsonify(success=False)
return resp
elif request.method == "GET":
delta = timedelta(days=365)
date_stop = datetime.now() - delta
try:
collection = db.reports
# print(f"count: {collection.count_documents({})}")
result = collection.find({"date": {"$gt": date_stop}}, {"_id": False}).sort(
"date"
)
res = list(result)
return jsonify(res)
except Exception as err:
resp = jsonify(success=False)
print(err)
|
[] |
[] |
[
"IPSTACK_API_KEY",
"MONGODB_URI"
] |
[]
|
["IPSTACK_API_KEY", "MONGODB_URI"]
|
python
| 2 | 0 | |
cmd/publish/pubsub/main.go
|
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/nats-io/nats"
)
const msgSubject = "natssample.pubsub"
func main() {
natsURL := nats.DefaultURL
if h := os.Getenv("NATS_HOST"); len(h) > 0 {
natsURL = "nats://" + h
}
nc, err := nats.Connect(natsURL)
if err != nil {
log.WithFields(log.Fields{"server-msg": err}).Fatalf("Failed to connect to NATS at %s", natsURL)
}
defer nc.Close()
msg := "Hello World"
if err := nc.Publish(msgSubject, []byte(msg)); err != nil {
log.WithFields(log.Fields{"server-msg": err}).Errorf("Unable to publish message %q\n", msg)
} else {
log.Infof("Publishing message %q\n", msg)
}
}
|
[
"\"NATS_HOST\""
] |
[] |
[
"NATS_HOST"
] |
[]
|
["NATS_HOST"]
|
go
| 1 | 0 | |
CUT/util/visualizer.py
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
# try:
#from func_timeout import func_timeout, FunctionTimedOut
# except ImportError:
# print("module func_timeout was not installed. Please install func_timeout using pip install func-timeout.")
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
#image_name = '%s_%s.png' % (name, label)
image_name = '%s/%s.png' % (label, name)
os.makedirs(os.path.join(image_dir, label), exist_ok=True)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
if opt.display_id is None:
self.display_id = np.random.randint(100000) * 10 # just a random display id
else:
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.plot_data = {}
self.ncols = opt.display_ncols
if "tensorboard_base_url" not in os.environ:
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
else:
self.vis = visdom.Visdom(port=2004,
base_url=os.environ['tensorboard_base_url'] + '/visdom')
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, ncols, 2, self.display_id + 1,
None, dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(
image_numpy.transpose([2, 0, 1]),
self.display_id + idx,
None,
dict(title=label)
)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if len(losses) == 0:
return
plot_name = '_'.join(list(losses.keys()))
if plot_name not in self.plot_data:
self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())}
plot_data = self.plot_data[plot_name]
plot_id = list(self.plot_data.keys()).index(plot_name)
plot_data['X'].append(epoch + counter_ratio)
plot_data['Y'].append([losses[k] for k in plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),
Y=np.array(plot_data['Y']),
opts={
'title': self.name,
'legend': plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id - plot_id)
except VisdomExceptionBase:
self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
[] |
[] |
[
"tensorboard_base_url"
] |
[]
|
["tensorboard_base_url"]
|
python
| 1 | 0 | |
djangoevents/tests/settings/settings_test_local.py
|
"""
Same as settings_test but instead of migration
uses provided sql. This is for test to make sure
that Event model works fine with db scheme
all teams agreed on.
"""
from .settings_test import *
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': os.environ.get("MYSQL_HOSTNAME", '127.0.0.1'),
'PORT': os.environ.get("MYSQL_PORT", 3306),
'NAME': os.environ.get("MYSQL_DATABASE", 'djangoevents'),
'USER': os.environ.get("MYSQL_USER", 'root'),
}
}
|
[] |
[] |
[
"MYSQL_HOSTNAME",
"MYSQL_DATABASE",
"MYSQL_PORT",
"MYSQL_USER"
] |
[]
|
["MYSQL_HOSTNAME", "MYSQL_DATABASE", "MYSQL_PORT", "MYSQL_USER"]
|
python
| 4 | 0 | |
Arrays/Left_Rotation.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
len_a = len(a)
if d < len_a:
a_left = a[d:]
a_right = a[:d]
new_a = a_left + a_right
return new_a
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
version/version.go
|
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package version
import (
"os"
"github.com/heroku/docker-registry-client/registry"
log "github.com/sirupsen/logrus"
)
// Version is the string that contains version
var Version string
// HelperVersionOverride decouples the k3d helper image versions from the main version, if needed
var HelperVersionOverride string
// K3sVersion should contain the latest version tag of k3s (hardcoded at build time)
// we're setting a default version for edge cases, because the 'latest' tag is not actively maintained
var K3sVersion = "v1.18.4+k3s1" // TODO: can we try to dynamically fetch the latest version at runtime and only fallback to this if it fails?
// GetVersion returns the version for cli, it gets it from "git describe --tags" or returns "dev" when doing simple go build
func GetVersion() string {
if len(Version) == 0 {
return "v3-dev"
}
return Version
}
// GetHelperImageVersion returns the CLI version or 'latest'
func GetHelperImageVersion() string {
if tag := os.Getenv("K3D_HELPER_IMAGE_TAG"); tag != "" {
log.Infoln("Helper image tag set from env var")
return tag
}
if len(HelperVersionOverride) > 0 {
return HelperVersionOverride
}
if len(Version) == 0 {
return "latest"
}
return Version
}
// GetK3sVersion returns the version string for K3s
func GetK3sVersion(latest bool) string {
if latest {
version, err := fetchLatestK3sVersion()
if err != nil || version == "" {
log.Warnln("Failed to fetch latest K3s version from DockerHub, falling back to hardcoded version.")
return K3sVersion
}
return version
}
return K3sVersion
}
// fetchLatestK3sVersion tries to fetch the latest version of k3s from DockerHub
func fetchLatestK3sVersion() (string, error) {
url := "https://registry-1.docker.io/"
username := "" // anonymous
password := "" // anonymous
repository := "rancher/k3s"
hub, err := registry.New(url, username, password)
if err != nil {
return "", err
}
tags, err := hub.Tags(repository)
if err != nil || len(tags) == 0 {
return "", err
}
log.Debugln("Fetched the following tags for rancher/k3s from DockerHub:")
log.Debugln(tags)
return "sampleTag", nil
}
|
[
"\"K3D_HELPER_IMAGE_TAG\""
] |
[] |
[
"K3D_HELPER_IMAGE_TAG"
] |
[]
|
["K3D_HELPER_IMAGE_TAG"]
|
go
| 1 | 0 | |
mongo-driver/mongo/with_transactions_test.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongo
import (
"context"
"errors"
"os"
"path"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/dollarkillerx/mongo/mongo-driver/bson"
"github.com/dollarkillerx/mongo/mongo-driver/internal/testutil"
testhelpers "github.com/dollarkillerx/mongo/mongo-driver/internal/testutil/helpers"
"github.com/dollarkillerx/mongo/mongo-driver/mongo/options"
"github.com/dollarkillerx/mongo/mongo-driver/mongo/readconcern"
"github.com/dollarkillerx/mongo/mongo-driver/mongo/readpref"
"github.com/dollarkillerx/mongo/mongo-driver/x/mongo/driver"
)
const convenientTransactionTestsDir = "../data/convenient-transactions"
type withTransactionArgs struct {
Callback *struct {
Operations []*transOperation `json:"operations"`
} `json:"callback"`
Options map[string]interface{} `json:"options"`
}
// test case for all TransactionSpec tests
func TestConvTransactionSpec(t *testing.T) {
for _, file := range testhelpers.FindJSONFilesInDir(t, convenientTransactionTestsDir) {
runTransactionTestFile(t, path.Join(convenientTransactionTestsDir, file))
}
}
func runWithTransactionOperations(t *testing.T, operations []*transOperation, sess *sessionImpl, collName string, db *Database) error {
for _, op := range operations {
if op.Name == "count" {
t.Skip("count has been deprecated")
}
// Arguments aren't marshaled directly into a map because runcommand
// needs to convert them into BSON docs. We convert them to a map here
// for getting the session and for all other collection operations
op.ArgMap = getArgMap(t, op.Arguments)
// create collection with default read preference Primary (needed to prevent server selection fail)
coll := db.Collection(collName, options.Collection().SetReadPreference(readpref.Primary()).SetReadConcern(readconcern.Local()))
addCollectionOptions(coll, op.CollectionOptions)
// execute the command on given object
var err error
switch op.Object {
case "session0":
err = executeSessionOperation(t, op, sess, collName, db)
case "collection":
err = executeCollectionOperation(t, op, sess, coll)
}
if err != nil {
return err
}
}
return nil
}
func TestConvenientTransactions(t *testing.T) {
cs := testutil.ConnString(t)
opts := options.Client().ApplyURI(cs.String())
if os.Getenv("TOPOLOGY") == "sharded_cluster" {
opts.SetHosts([]string{opts.Hosts[0]})
}
client, err := Connect(context.Background(), opts)
require.NoError(t, err)
defer func() { _ = client.Disconnect(context.Background()) }()
dbName := "TestConvenientTransactions"
db := client.Database(dbName)
dbAdmin := client.Database("admin")
version, err := getServerVersion(dbAdmin)
require.NoError(t, err)
if compareVersions(t, version, "4.1") < 0 || os.Getenv("TOPOLOGY") == "server" {
t.Skip()
}
t.Run("CallbackRaisesCustomError", func(t *testing.T) {
collName := "unpinForNextTransaction"
db.RunCommand(
context.Background(),
bson.D{{"drop", collName}},
)
coll := db.Collection(collName)
_, err = coll.InsertOne(ctx, bson.D{{"x", 1}})
testErr := errors.New("Test Error")
sess, err := client.StartSession()
require.NoError(t, err)
defer sess.EndSession(context.Background())
_, err = sess.WithTransaction(context.Background(), func(sessCtx SessionContext) (interface{}, error) {
return nil, testErr
})
require.Error(t, err)
require.Equal(t, err, testErr)
})
t.Run("CallbackReturnsAValue", func(t *testing.T) {
collName := "CallbackReturnsAValue"
db.RunCommand(
context.Background(),
bson.D{{"drop", collName}},
)
coll := db.Collection(collName)
_, err = coll.InsertOne(ctx, bson.D{{"x", 1}})
sess, err := client.StartSession()
require.NoError(t, err)
defer sess.EndSession(context.Background())
res, err := sess.WithTransaction(context.Background(), func(sessCtx SessionContext) (interface{}, error) {
return false, nil
})
require.NoError(t, err)
resBool, ok := res.(bool)
require.True(t, ok)
require.False(t, resBool)
})
t.Run("RetryTimeoutEnforced", func(t *testing.T) {
withTransactionTimeout = time.Second
collName := "RetryTimeoutEnforced"
db.RunCommand(
context.Background(),
bson.D{{"drop", collName}},
)
coll := db.Collection(collName)
_, err = coll.InsertOne(ctx, bson.D{{"x", 1}})
t.Run("CallbackWithTransientTransactionError", func(t *testing.T) {
sess, err := client.StartSession()
require.NoError(t, err)
defer sess.EndSession(context.Background())
_, err = sess.WithTransaction(context.Background(), func(sessCtx SessionContext) (interface{}, error) {
return nil, CommandError{Name: "test Error", Labels: []string{driver.TransientTransactionError}}
})
require.Error(t, err)
cmdErr, ok := err.(CommandError)
require.True(t, ok)
require.True(t, cmdErr.HasErrorLabel(driver.TransientTransactionError))
})
t.Run("UnknownTransactionCommitResult", func(t *testing.T) {
//set failpoint
failpoint := bson.D{{"configureFailPoint", "failCommand"},
{"mode", "alwaysOn"},
{"data", bson.D{{"failCommands", bson.A{"commitTransaction"}}, {"closeConnection", true}}}}
require.NoError(t, dbAdmin.RunCommand(ctx, failpoint).Err())
defer func() {
require.NoError(t, dbAdmin.RunCommand(ctx, bson.D{
{"configureFailPoint", "failCommand"},
{"mode", "off"},
}).Err())
}()
sess, err := client.StartSession()
require.NoError(t, err)
defer sess.EndSession(context.Background())
_, err = sess.WithTransaction(context.Background(), func(sessCtx SessionContext) (interface{}, error) {
_, err := sessCtx.Client().Database(dbName).Collection(collName).InsertOne(sessCtx, bson.D{{"x", 1}})
return nil, err
})
require.Error(t, err)
cmdErr, ok := err.(CommandError)
require.True(t, ok)
require.True(t, cmdErr.HasErrorLabel(driver.UnknownTransactionCommitResult))
})
t.Run("CommitWithTransientTransactionError", func(t *testing.T) {
//set failpoint
failpoint := bson.D{{"configureFailPoint", "failCommand"},
{"mode", "alwaysOn"},
{"data", bson.D{{"failCommands", bson.A{"commitTransaction"}}, {"errorCode", 251}}}}
err = dbAdmin.RunCommand(ctx, failpoint).Err()
require.NoError(t, err)
defer func() {
require.NoError(t, dbAdmin.RunCommand(ctx, bson.D{
{"configureFailPoint", "failCommand"},
{"mode", "off"},
}).Err())
}()
sess, err := client.StartSession()
require.NoError(t, err)
defer sess.EndSession(context.Background())
_, err = sess.WithTransaction(context.Background(), func(sessCtx SessionContext) (interface{}, error) {
_, err := sessCtx.Client().Database(dbName).Collection(collName).InsertOne(sessCtx, bson.D{{"x", 1}})
return nil, err
})
require.Error(t, err)
cmdErr, ok := err.(CommandError)
require.True(t, ok)
require.True(t, cmdErr.HasErrorLabel(driver.TransientTransactionError))
})
})
}
|
[
"\"TOPOLOGY\"",
"\"TOPOLOGY\""
] |
[] |
[
"TOPOLOGY"
] |
[]
|
["TOPOLOGY"]
|
go
| 1 | 0 | |
WebMonitoring/API/settings.py
|
import os
# DB configs
MYSQL_DATABASE_USER = "root"
MYSQL_DATABASE_PASSWORD = "password"
MYSQL_DATABASE_DB = "WebMonitoring"
if os.environ.get("DOCKER_COMPOSE_BUILD"):
MYSQL_DATABASE_HOST = "mysql"
else:
MYSQL_DATABASE_HOST = "10.96.0.2"
|
[] |
[] |
[
"DOCKER_COMPOSE_BUILD"
] |
[]
|
["DOCKER_COMPOSE_BUILD"]
|
python
| 1 | 0 | |
vertical-pod-autoscaler/e2e/v1/actuation.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"context"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
autoscaling "k8s.io/api/autoscaling/v1"
apiv1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/annotations"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"
framework_job "k8s.io/kubernetes/test/e2e/framework/job"
framework_rc "k8s.io/kubernetes/test/e2e/framework/rc"
framework_rs "k8s.io/kubernetes/test/e2e/framework/replicaset"
framework_ss "k8s.io/kubernetes/test/e2e/framework/statefulset"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = ActuationSuiteE2eDescribe("Actuation", func() {
f := framework.NewDefaultFramework("vertical-pod-autoscaling")
ginkgo.It("stops when pods get pending", func() {
ginkgo.By("Setting up a hamster deployment")
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
ginkgo.By("Setting up a VPA CRD with ridiculous request")
SetupVPA(f, "9999", vpa_types.UpdateModeAuto, hamsterTargetRef) // Request 9999 CPUs to make POD pending
ginkgo.By("Waiting for pods to be restarted and stuck pending")
err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.It("never applies recommendations when update mode is Off", func() {
ginkgo.By("Setting up a hamster deployment")
d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
cpuRequest := getCPURequest(d.Spec.Template.Spec)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up a VPA CRD in mode Off")
SetupVPA(f, "200m", vpa_types.UpdateModeOff, hamsterTargetRef)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Forcefully killing one pod")
killPod(f, podList)
ginkgo.By("Checking the requests were not modified")
updatedPodList, err := GetHamsterPods(f)
for _, pod := range updatedPodList.Items {
gomega.Expect(getCPURequest(pod.Spec)).To(gomega.Equal(cpuRequest))
}
})
ginkgo.It("applies recommendations only on restart when update mode is Initial", func() {
ginkgo.By("Setting up a hamster deployment")
SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up a VPA CRD in mode Initial")
SetupVPA(f, "200m", vpa_types.UpdateModeInitial, hamsterTargetRef)
updatedCPURequest := ParseQuantityOrDie("200m")
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Forcefully killing one pod")
killPod(f, podList)
ginkgo.By("Checking that request was modified after forceful restart")
updatedPodList, err := GetHamsterPods(f)
foundUpdated := 0
for _, pod := range updatedPodList.Items {
podRequest := getCPURequest(pod.Spec)
framework.Logf("podReq: %v", podRequest)
if podRequest.Cmp(updatedCPURequest) == 0 {
foundUpdated += 1
}
}
gomega.Expect(foundUpdated).To(gomega.Equal(1))
})
ginkgo.It("evicts pods in a Deployment", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: "hamster-deployment",
})
})
ginkgo.It("evicts pods in a Replication Controller", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "v1",
Kind: "ReplicationController",
Name: "hamster-rc",
})
})
ginkgo.It("evicts pods in a Job", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "batch/v1",
Kind: "Job",
Name: "hamster-job",
})
})
ginkgo.It("evicts pods in a CronJob", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "batch/v1",
Kind: "CronJob",
Name: "hamster-cronjob",
})
})
ginkgo.It("evicts pods in a ReplicaSet", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: "hamster-rs",
})
})
ginkgo.It("evicts pods in a StatefulSet", func() {
testEvictsPods(f, &autoscaling.CrossVersionObjectReference{
APIVersion: "apps/v1",
Kind: "StatefulSet",
Name: "hamster-stateful",
})
})
ginkgo.It("observes pod disruption budget", func() {
ginkgo.By("Setting up a hamster deployment")
c := f.ClientSet
ns := f.Namespace.Name
SetupHamsterDeployment(f, "10m", "10Mi", 10)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
podSet := MakePodSet(podList)
ginkgo.By("Setting up prohibitive PDB for hamster deployment")
pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */)
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "25m", vpa_types.UpdateModeAuto, hamsterTargetRef)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
ginkgo.By("Updating the PDB to allow for multiple pods to be evicted")
// We will check that 7 replicas are evicted in 3 minutes, which translates
// to 3 updater loops. This gives us relatively good confidence that updater
// evicts more than one pod in a loop if PDB allows it.
permissiveMaxUnavailable := 7
// Creating new PDB and removing old one, since PDBs are immutable at the moment
setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable)
err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String()))
time.Sleep(VpaEvictionTimeout)
ginkgo.By("Checking enough pods were evicted.")
currentPodList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
evictedCount := GetEvictedPodsCount(MakePodSet(currentPodList), podSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
})
ginkgo.It("observes container max in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef)
// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
// recommendation is 200m
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "300m", "1T", apiv1.LimitTypeContainer)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("observes container min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef)
// Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min
// request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("observes pod max in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPAForNHamsters(f, 2, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef)
// Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m,
// while recommendation is 200m
// Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("observes pod min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)
ginkgo.By("Setting up a VPA CRD")
SetupVPAForNHamsters(f, 2, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef)
// Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both
// to limit and request so min request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
ginkgo.It("does not act on injected sidecars", func() {
const (
// TODO(krzysied): Update the image url when the agnhost:2.10 image
// is promoted to the k8s-e2e-test-images repository.
agnhostImage = "gcr.io/k8s-staging-e2e-test-images/agnhost:2.10"
sidecarParam = "--sidecar-image=k8s.gcr.io/pause:3.1"
sidecarName = "webhook-added-sidecar"
servicePort = int32(8443)
containerPort = int32(8444)
)
ginkgo.By("Setting up Webhook for sidecar injection")
client := f.ClientSet
namespaceName := f.Namespace.Name
defer utils.CleanWebhookTest(client, namespaceName)
// Make sure the namespace created for the test is labeled to be selected by the webhooks.
utils.LabelNamespace(f, f.Namespace.Name)
utils.CreateWebhookConfigurationReadyNamespace(f)
ginkgo.By("Setting up server cert")
context := utils.SetupWebhookCert(namespaceName)
utils.CreateAuthReaderRoleBinding(f, namespaceName)
utils.DeployWebhookAndService(f, agnhostImage, context, servicePort, containerPort, sidecarParam)
// Webhook must be placed after vpa webhook. Webhooks are registered alphabetically.
// Use name that starts with "z".
webhookCleanup := utils.RegisterMutatingWebhookForPod(f, "z-sidecar-injection-webhook", context, servicePort)
defer webhookCleanup()
ginkgo.By("Setting up a hamster vpa")
mode := vpa_types.UpdateModeAuto
hamsterResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("100m")}
sidecarResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("5000m")}
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: GetHamsterContainerNameByIndex(0),
Target: hamsterResourceList,
LowerBound: hamsterResourceList,
UpperBound: hamsterResourceList,
},
{
ContainerName: sidecarName,
Target: sidecarResourceList,
LowerBound: sidecarResourceList,
UpperBound: sidecarResourceList,
},
},
}
InstallVPA(f, vpaCRD)
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m"), ParseQuantityOrDie("100Mi"))
podList := startDeploymentPods(f, d)
for _, pod := range podList.Items {
observedContainers, ok := pod.GetAnnotations()[annotations.VpaObservedContainersLabel]
gomega.Expect(ok).To(gomega.Equal(true))
containers, err := annotations.ParseVpaObservedContainersValue(observedContainers)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(containers).To(gomega.HaveLen(1))
gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(2))
}
podSet := MakePodSet(podList)
ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, podSet)
})
})
func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity {
return podSpec.Containers[0].Resources.Requests[apiv1.ResourceCPU]
}
func killPod(f *framework.Framework, podList *apiv1.PodList) {
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podList.Items[0].Name, metav1.DeleteOptions{})
err := WaitForPodsRestarted(f, podList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// assertPodsPendingForDuration checks that at most pendingPodsNum pods are pending for pendingDuration
func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error {
pendingPods := make(map[string]time.Time)
err := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) {
var err error
currentPodList, err := framework_deployment.GetPodsForDeployment(c, deployment)
if err != nil {
return false, err
}
missingPods := make(map[string]bool)
for podName := range pendingPods {
missingPods[podName] = true
}
now := time.Now()
for _, pod := range currentPodList.Items {
delete(missingPods, pod.Name)
switch pod.Status.Phase {
case apiv1.PodPending:
_, ok := pendingPods[pod.Name]
if !ok {
pendingPods[pod.Name] = now
}
default:
delete(pendingPods, pod.Name)
}
}
for missingPod := range missingPods {
delete(pendingPods, missingPod)
}
if len(pendingPods) < pendingPodsNum {
return false, nil
}
if len(pendingPods) > pendingPodsNum {
return false, fmt.Errorf("%v pending pods seen - expecting %v", len(pendingPods), pendingPodsNum)
}
for p, t := range pendingPods {
fmt.Println("task", now, p, t, now.Sub(t), pendingDuration)
if now.Sub(t) < pendingDuration {
return false, nil
}
}
return true, nil
})
if err != nil {
return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err)
}
return nil
}
func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {
ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind))
setupHamsterController(f, controller.Kind, "100m", "100Mi", defaultHamsterReplicas)
podList, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "200m", vpa_types.UpdateModeAuto, controller)
ginkgo.By("Waiting for pods to be evicted")
err = WaitForPodsEvicted(f, podList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory string, replicas int32) *apiv1.PodList {
switch controllerKind {
case "Deployment":
SetupHamsterDeployment(f, cpu, memory, replicas)
case "ReplicationController":
setupHamsterReplicationController(f, cpu, memory, replicas)
case "Job":
setupHamsterJob(f, cpu, memory, replicas)
case "CronJob":
SetupHamsterCronJob(f, "*/2 * * * *", cpu, memory, replicas)
case "ReplicaSet":
setupHamsterRS(f, cpu, memory, replicas)
case "StatefulSet":
setupHamsterStateful(f, cpu, memory, replicas)
default:
framework.Failf("Unknown controller kind: %v", controllerKind)
return nil
}
pods, err := GetHamsterPods(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return pods
}
func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) {
hamsterContainer := SetupHamsterContainer(cpu, memory)
rc := framework_rc.ByNameContainer("hamster-rc", replicas, hamsterLabels, hamsterContainer, nil)
rc.Namespace = f.Namespace.Name
err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitForRCPodsRunning(f, rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error {
return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
podList, err := GetHamsterPods(f)
if err != nil {
framework.Logf("Error listing pods, retrying: %v", err)
return false, nil
}
podsRunning := int32(0)
for _, pod := range podList.Items {
if pod.Status.Phase == apiv1.PodRunning {
podsRunning += 1
}
}
return podsRunning == *rc.Spec.Replicas, nil
})
}
func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) {
job := framework_job.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure,
replicas, replicas, nil, 10)
job.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
for label, value := range hamsterLabels {
job.Spec.Template.Labels[label] = value
}
err := testutils.CreateJobWithRetries(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework_job.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) {
rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, hamsterLabels, "", "")
rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework_rs.WaitForReadyReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) {
stateful := framework_ss.NewStatefulSet("hamster-stateful", f.Namespace.Name,
"hamster-service", replicas, nil, nil, hamsterLabels)
stateful.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory)
err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework_ss.WaitForRunningAndReady(f.ClientSet, *stateful.Spec.Replicas, stateful)
}
func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1beta1.PodDisruptionBudget {
maxUnavailableIntstr := intstr.FromInt(maxUnavailable)
pdb := &policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: policyv1beta1.PodDisruptionBudgetSpec{
MaxUnavailable: &maxUnavailableIntstr,
Selector: &metav1.LabelSelector{
MatchLabels: hamsterLabels,
},
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(context.TODO(), pdb, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return pdb
}
func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) PodSet {
podList, err := framework_deployment.GetPodsForDeployment(c, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return MakePodSet(podList)
}
func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error {
if obj == nil {
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
if err == nil || apierrs.IsAlreadyExists(err) {
return true, nil
}
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}
func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error {
if obj == nil {
return fmt.Errorf("object provided to create is empty")
}
createFunc := func() (bool, error) {
_, err := c.AppsV1().StatefulSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
if err == nil || apierrs.IsAlreadyExists(err) {
return true, nil
}
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
}
return testutils.RetryWithExponentialBackOff(createFunc)
}
// newReplicaSet returns a new ReplicaSet.
func newReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet {
return &appsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: appsv1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Replicas: &replicas,
Template: apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: imageName,
Image: image,
SecurityContext: &apiv1.SecurityContext{},
},
},
},
},
},
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
python/pycarbon/tests/hello_world/dataset_with_unischema/tests/test_generate_dataset.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from pycarbon.tests.hello_world.dataset_with_unischema.generate_pycarbon_dataset import generate_pycarbon_dataset
from pycarbon.tests.hello_world.dataset_with_unischema.pyspark_hello_world_carbon import pyspark_hello_world
from pycarbon.tests.hello_world.dataset_with_unischema.python_hello_world_carbon import python_hello_world
from pycarbon.tests.hello_world.dataset_with_unischema.tensorflow_hello_world_carbon import tensorflow_hello_world
from petastorm.tests.conftest import SyntheticDataset
from pycarbon.core.Constants import LOCAL_FILE_PREFIX
from pycarbon.reader import make_reader
import jnius_config
jnius_config.set_classpath(pytest.config.getoption("--carbon-sdk-path"))
if pytest.config.getoption("--pyspark-python") is not None and \
pytest.config.getoption("--pyspark-driver-python") is not None:
os.environ['PYSPARK_PYTHON'] = pytest.config.getoption("--pyspark-python")
os.environ['PYSPARK_DRIVER_PYTHON'] = pytest.config.getoption("--pyspark-driver-python")
elif 'PYSPARK_PYTHON' in os.environ.keys() and 'PYSPARK_DRIVER_PYTHON' in os.environ.keys():
pass
else:
raise ValueError("please set PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON variables, "
"using cmd line --pyspark-python=PYSPARK_PYTHON_PATH --pyspark-driver-python=PYSPARK_DRIVER_PYTHON_PATH, "
"or set PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON in system env")
@pytest.fixture(scope="session")
def pycarbon_dataset(tmpdir_factory):
path = tmpdir_factory.mktemp("data").strpath
url = LOCAL_FILE_PREFIX + path
generate_pycarbon_dataset(url)
dataset = SyntheticDataset(url=url, path=path, data=None)
# Generate a dataset
assert os.path.exists(os.path.join(path, '_SUCCESS'))
return dataset
def test_generate(pycarbon_dataset):
# Read from it using a plain reader
with make_reader(pycarbon_dataset.url) as reader:
all_samples = list(reader)
assert all_samples
with make_reader(pycarbon_dataset.url, is_batch=False) as reader:
all_samples = list(reader)
assert all_samples
def test_pyspark_hello_world_pycarbon_dataset_example(pycarbon_dataset):
pyspark_hello_world(pycarbon_dataset.url)
def test_python_hello_world_pycarbon_dataset_example(pycarbon_dataset):
python_hello_world(pycarbon_dataset.url)
def test_tensorflow_hello_world_pycarbon_dataset_example(pycarbon_dataset):
tensorflow_hello_world(pycarbon_dataset.url)
|
[] |
[] |
[
"PYSPARK_PYTHON",
"PYSPARK_DRIVER_PYTHON"
] |
[]
|
["PYSPARK_PYTHON", "PYSPARK_DRIVER_PYTHON"]
|
python
| 2 | 0 | |
internal/exec/exec.go
|
// Package exec implements a minimalized external process launcher. It exists to work around some shortcomings for
// Windows scenarios that aren't exposed via the os/exec package.
package exec
import (
"errors"
"fmt"
"os"
"strings"
"syscall"
"unicode/utf16"
"unsafe"
"golang.org/x/sys/windows"
)
var (
errProcNotStarted = errors.New("process has not started yet")
errProcNotFinished = errors.New("process has not finished yet")
)
// Exec is an object that represents an external process. A user should NOT initialize one manually and instead should
// call New() and pass in the relevant options to retrieve one.
//
// The Exec object is not intended to be used across threads and most methods should only be called once per object.
// It's expected to follow one of two conventions for starting and managing the lifetime of the process.
//
// Either: New() -> e.Start() -> e.Wait() -> (Optional) e.ExitCode()
//
// or: New() -> e.Run() -> (Optional) e.ExitCode()
//
// To capture output or send data to the process, the Stdin(), StdOut() and StdIn() methods can be used.
type Exec struct {
path string
cmdline string
// Process filled in after Start() returns successfully.
process *os.Process
// procState will be filled in after Wait() returns.
procState *os.ProcessState
waitCalled bool
// stdioPipesOurSide are the stdio pipes that Exec owns and that we will use to send and receive input from the process.
// These are what will be returned from calls to Exec.Stdin()/Stdout()/Stderr().
stdioPipesOurSide [3]*os.File
// stdioPipesProcSide are the stdio pipes that will be passed into the process. These should not be interacted with at all
// and aren't exposed in any way to a user of Exec.
stdioPipesProcSide [3]*os.File
attrList *windows.ProcThreadAttributeListContainer
*execConfig
}
// New returns a new instance of an `Exec` object. A process is not running at this point and must be started via either Run(), or a combination
// of Start() + Wait().
func New(path, cmdLine string, opts ...ExecOpts) (*Exec, error) {
// Path is the only required parameter here, as we need something to launch.
if path == "" {
return nil, errors.New("path cannot be empty")
}
// Apply all of the options passed in.
eopts := &execConfig{}
for _, o := range opts {
if err := o(eopts); err != nil {
return nil, err
}
}
e := &Exec{
path: path,
cmdline: cmdLine,
execConfig: eopts,
}
if err := e.setupStdio(); err != nil {
return nil, err
}
return e, nil
}
// Start starts the process with the path and cmdline specified when the Exec object was created. This does not wait for exit or release any resources,
// a call to Wait must be made afterwards.
func (e *Exec) Start() error {
argv0 := e.path
if len(e.dir) != 0 {
// Windows CreateProcess looks for argv0 relative to the current
// directory, and, only once the new process is started, it does
// Chdir(attr.Dir). We are adjusting for that difference here by
// making argv0 absolute.
var err error
argv0, err = joinExeDirAndFName(e.dir, e.path)
if err != nil {
return err
}
}
argv0p, err := windows.UTF16PtrFromString(argv0)
if err != nil {
return err
}
argvp, err := windows.UTF16PtrFromString(e.cmdline)
if err != nil {
return err
}
var dirp *uint16
if len(e.dir) != 0 {
dirp, err = windows.UTF16PtrFromString(e.dir)
if err != nil {
return err
}
}
siEx := new(windows.StartupInfoEx)
siEx.Flags = windows.STARTF_USESTDHANDLES
pi := new(windows.ProcessInformation)
// Need EXTENDED_STARTUPINFO_PRESENT as we're making use of the attribute list field.
flags := uint32(windows.CREATE_UNICODE_ENVIRONMENT) | windows.EXTENDED_STARTUPINFO_PRESENT | e.execConfig.processFlags
// Allocate an attribute list that's large enough to do the operations we care about
// 1. Assigning to a job object at creation time
// 2. Pseudo console setup if one was requested.
// 3. Inherit only stdio handles if ones were requested.
// Therefore we need a list of size 3.
e.attrList, err = windows.NewProcThreadAttributeList(3)
if err != nil {
return fmt.Errorf("failed to initialize process thread attribute list: %w", err)
}
// Need to know whether the process needs to inherit stdio handles. The below setup is so that we only inherit the
// stdio pipes and nothing else into the new process.
inheritHandles := e.stdioPipesProcSide[0] != nil || e.stdioPipesProcSide[1] != nil || e.stdioPipesProcSide[2] != nil
if inheritHandles {
var handles []uintptr
for _, file := range e.stdioPipesProcSide {
if file.Fd() != uintptr(syscall.InvalidHandle) {
handles = append(handles, file.Fd())
}
}
// Set up the process to only inherit stdio handles and nothing else.
err := e.attrList.Update(
windows.PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
unsafe.Pointer(&handles[0]),
uintptr(len(handles))*unsafe.Sizeof(handles[0]),
)
if err != nil {
return err
}
// Assign the handles to the startupinfos stdio fields.
if e.stdioPipesProcSide[0] != nil {
siEx.StdInput = windows.Handle(e.stdioPipesProcSide[0].Fd())
}
if e.stdioPipesProcSide[1] != nil {
siEx.StdOutput = windows.Handle(e.stdioPipesProcSide[1].Fd())
}
if e.stdioPipesProcSide[2] != nil {
siEx.StdErr = windows.Handle(e.stdioPipesProcSide[2].Fd())
}
}
if e.job != nil {
if err := e.job.UpdateProcThreadAttribute(e.attrList); err != nil {
return err
}
}
if e.cpty != nil {
if err := e.cpty.UpdateProcThreadAttribute(e.attrList); err != nil {
return err
}
}
var zeroSec windows.SecurityAttributes
pSec := &windows.SecurityAttributes{Length: uint32(unsafe.Sizeof(zeroSec)), InheritHandle: 1}
tSec := &windows.SecurityAttributes{Length: uint32(unsafe.Sizeof(zeroSec)), InheritHandle: 1}
siEx.ProcThreadAttributeList = e.attrList.List()
siEx.Cb = uint32(unsafe.Sizeof(*siEx))
if e.execConfig.token != 0 {
err = windows.CreateProcessAsUser(
e.execConfig.token,
argv0p,
argvp,
pSec,
tSec,
inheritHandles,
flags,
createEnvBlock(addCriticalEnv(dedupEnvCase(true, e.env))),
dirp,
&siEx.StartupInfo,
pi,
)
} else {
err = windows.CreateProcess(
argv0p,
argvp,
pSec,
tSec,
inheritHandles,
flags,
createEnvBlock(addCriticalEnv(dedupEnvCase(true, e.env))),
dirp,
&siEx.StartupInfo,
pi,
)
}
if err != nil {
return fmt.Errorf("failed to create process: %w", err)
}
// Don't need the thread handle for anything.
defer func() {
_ = windows.CloseHandle(windows.Handle(pi.Thread))
}()
// Grab an *os.Process to avoid reinventing the wheel here. The stdlib has great logic around waiting, exit code status/cleanup after a
// process has been launched.
e.process, err = os.FindProcess(int(pi.ProcessId))
if err != nil {
// If we can't find the process via os.FindProcess, terminate the process as that's what we rely on for all further operations on the
// object.
if tErr := windows.TerminateProcess(pi.Process, 1); tErr != nil {
return fmt.Errorf("failed to terminate process after process not found: %w", tErr)
}
return fmt.Errorf("failed to find process after starting: %w", err)
}
return nil
}
// Run will run the process to completion. This can be accomplished manually by calling Start + Wait afterwards.
func (e *Exec) Run() error {
if err := e.Start(); err != nil {
return err
}
return e.Wait()
}
// Close will release resources tied to the process (stdio etc.)
func (e *Exec) close() error {
if e.procState == nil {
return errProcNotFinished
}
e.attrList.Delete()
e.closeStdio()
return nil
}
// Pid returns the pid of the running process. If the process isn't running, this will return -1.
func (e *Exec) Pid() int {
if e.process == nil {
return -1
}
return e.process.Pid
}
// Exited returns if the process has exited.
func (e *Exec) Exited() bool {
if e.procState == nil {
return false
}
return e.procState.Exited()
}
// ExitCode returns the exit code of the process. If the process hasn't exited, this will return -1.
func (e *Exec) ExitCode() int {
if e.procState == nil {
return -1
}
return e.procState.ExitCode()
}
// Wait synchronously waits for the process to complete and will close the stdio pipes afterwards. This should only be called once per Exec
// object.
func (e *Exec) Wait() (err error) {
if e.process == nil {
return errProcNotStarted
}
if e.waitCalled {
return errors.New("exec: Wait was already called")
}
e.waitCalled = true
e.procState, err = e.process.Wait()
if err != nil {
return err
}
return e.close()
}
// Kill will forcefully kill the process.
func (e *Exec) Kill() error {
if e.process == nil {
return errProcNotStarted
}
return e.process.Kill()
}
// Stdin returns the pipe standard input is hooked up to. This will be closed once Wait returns.
func (e *Exec) Stdin() *os.File {
if e.cpty != nil {
return e.cpty.InPipe()
}
return e.stdioPipesOurSide[0]
}
// Stdout returns the pipe standard output is hooked up to. It's expected that the client will continuously drain the pipe if standard output is requested.
// The pipe will be closed once Wait returns.
func (e *Exec) Stdout() *os.File {
if e.cpty != nil {
return e.cpty.OutPipe()
}
return e.stdioPipesOurSide[1]
}
// Stderr returns the pipe standard error is hooked up to. It's expected that the client will continuously drain the pipe if standard output is requested.
// This will be closed once Wait returns.
func (e *Exec) Stderr() *os.File {
if e.cpty != nil {
return e.cpty.OutPipe()
}
return e.stdioPipesOurSide[2]
}
// setupStdio handles setting up stdio for the process.
func (e *Exec) setupStdio() error {
stdioRequested := e.stdin || e.stderr || e.stdout
// If the client requested a pseudo console then there's nothing we need to do pipe wise, as the process inherits the other end of the pty's
// pipes.
if e.cpty != nil && stdioRequested {
return nil
}
// Go 1.16's pipe handles (from os.Pipe()) aren't inheritable, so mark them explicitly as such if any stdio handles are
// requested and someone may be building on 1.16.
if e.stdin {
pr, pw, err := os.Pipe()
if err != nil {
return err
}
e.stdioPipesOurSide[0] = pw
if err := windows.SetHandleInformation(
windows.Handle(pr.Fd()),
windows.HANDLE_FLAG_INHERIT,
windows.HANDLE_FLAG_INHERIT,
); err != nil {
return fmt.Errorf("failed to make stdin pipe inheritable: %w", err)
}
e.stdioPipesProcSide[0] = pr
}
if e.stdout {
pr, pw, err := os.Pipe()
if err != nil {
return err
}
e.stdioPipesOurSide[1] = pr
if err := windows.SetHandleInformation(
windows.Handle(pw.Fd()),
windows.HANDLE_FLAG_INHERIT,
windows.HANDLE_FLAG_INHERIT,
); err != nil {
return fmt.Errorf("failed to make stdout pipe inheritable: %w", err)
}
e.stdioPipesProcSide[1] = pw
}
if e.stderr {
pr, pw, err := os.Pipe()
if err != nil {
return err
}
e.stdioPipesOurSide[2] = pr
if err := windows.SetHandleInformation(
windows.Handle(pw.Fd()),
windows.HANDLE_FLAG_INHERIT,
windows.HANDLE_FLAG_INHERIT,
); err != nil {
return fmt.Errorf("failed to make stderr pipe inheritable: %w", err)
}
e.stdioPipesProcSide[2] = pw
}
return nil
}
func (e *Exec) closeStdio() {
for i, file := range e.stdioPipesOurSide {
if file != nil {
file.Close()
}
e.stdioPipesOurSide[i] = nil
}
for i, file := range e.stdioPipesProcSide {
if file != nil {
file.Close()
}
e.stdioPipesProcSide[i] = nil
}
}
//
// Below are a bunch of helpers for working with Windows' CreateProcess family of functions. These are mostly exact copies of the same utilities
// found in the go stdlib.
//
func isSlash(c uint8) bool {
return c == '\\' || c == '/'
}
func normalizeDir(dir string) (name string, err error) {
ndir, err := syscall.FullPath(dir)
if err != nil {
return "", err
}
if len(ndir) > 2 && isSlash(ndir[0]) && isSlash(ndir[1]) {
// dir cannot have \\server\share\path form
return "", syscall.EINVAL
}
return ndir, nil
}
func volToUpper(ch int) int {
if 'a' <= ch && ch <= 'z' {
ch += 'A' - 'a'
}
return ch
}
func joinExeDirAndFName(dir, p string) (name string, err error) {
if len(p) == 0 {
return "", syscall.EINVAL
}
if len(p) > 2 && isSlash(p[0]) && isSlash(p[1]) {
// \\server\share\path form
return p, nil
}
if len(p) > 1 && p[1] == ':' {
// has drive letter
if len(p) == 2 {
return "", syscall.EINVAL
}
if isSlash(p[2]) {
return p, nil
} else {
d, err := normalizeDir(dir)
if err != nil {
return "", err
}
if volToUpper(int(p[0])) == volToUpper(int(d[0])) {
return syscall.FullPath(d + "\\" + p[2:])
} else {
return syscall.FullPath(p)
}
}
} else {
// no drive letter
d, err := normalizeDir(dir)
if err != nil {
return "", err
}
if isSlash(p[0]) {
return windows.FullPath(d[:2] + p)
} else {
return windows.FullPath(d + "\\" + p)
}
}
}
// createEnvBlock converts an array of environment strings into
// the representation required by CreateProcess: a sequence of NUL
// terminated strings followed by a nil.
// Last bytes are two UCS-2 NULs, or four NUL bytes.
func createEnvBlock(envv []string) *uint16 {
if len(envv) == 0 {
return &utf16.Encode([]rune("\x00\x00"))[0]
}
length := 0
for _, s := range envv {
length += len(s) + 1
}
length += 1
b := make([]byte, length)
i := 0
for _, s := range envv {
l := len(s)
copy(b[i:i+l], []byte(s))
copy(b[i+l:i+l+1], []byte{0})
i = i + l + 1
}
copy(b[i:i+1], []byte{0})
return &utf16.Encode([]rune(string(b)))[0]
}
// dedupEnvCase is dedupEnv with a case option for testing.
// If caseInsensitive is true, the case of keys is ignored.
func dedupEnvCase(caseInsensitive bool, env []string) []string {
out := make([]string, 0, len(env))
saw := make(map[string]int, len(env)) // key => index into out
for _, kv := range env {
eq := strings.Index(kv, "=")
if eq < 0 {
out = append(out, kv)
continue
}
k := kv[:eq]
if caseInsensitive {
k = strings.ToLower(k)
}
if dupIdx, isDup := saw[k]; isDup {
out[dupIdx] = kv
continue
}
saw[k] = len(out)
out = append(out, kv)
}
return out
}
// addCriticalEnv adds any critical environment variables that are required
// (or at least almost always required) on the operating system.
// Currently this is only used for Windows.
func addCriticalEnv(env []string) []string {
for _, kv := range env {
eq := strings.Index(kv, "=")
if eq < 0 {
continue
}
k := kv[:eq]
if strings.EqualFold(k, "SYSTEMROOT") {
// We already have it.
return env
}
}
return append(env, "SYSTEMROOT="+os.Getenv("SYSTEMROOT"))
}
|
[
"\"SYSTEMROOT\""
] |
[] |
[
"SYSTEMROOT"
] |
[]
|
["SYSTEMROOT"]
|
go
| 1 | 0 | |
pkg/client/config/config.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
logf "github.com/tsungming/controller-runtime/pkg/runtime/log"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
kubeconfig, masterURL string
log = logf.KBLog.WithName("client").WithName("config")
)
func init() {
// TODO: Fix this to allow double vendoring this library but still register flags on behalf of users
flag.StringVar(&kubeconfig, "kubeconfig", "",
"Paths to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "",
"The address of the Kubernetes API server. Overrides any value in kubeconfig. "+
"Only required if out-of-cluster.")
}
// GetConfig creates a *rest.Config for talking to a Kubernetes apiserver.
// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
// in cluster and use the cluster provided kubeconfig.
//
// Config precedence
//
// * --kubeconfig flag pointing at a file
//
// * KUBECONFIG environment variable pointing at a file
//
// * In-cluster config if running in cluster
//
// * $HOME/.kube/config if exists
func GetConfig() (*rest.Config, error) {
// If a flag is specified with the config location, use that
if len(kubeconfig) > 0 {
return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
}
// If an env variable is specified with the config locaiton, use that
if len(os.Getenv("KUBECONFIG")) > 0 {
return clientcmd.BuildConfigFromFlags(masterURL, os.Getenv("KUBECONFIG"))
}
// If no explicit location, try the in-cluster config
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
// If no in-cluster config, try the default location in the user's home directory
if usr, err := user.Current(); err == nil {
if c, err := clientcmd.BuildConfigFromFlags(
"", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not locate a kubeconfig")
}
// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver.
// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running
// in cluster and use the cluster provided kubeconfig.
//
// Will log an error and exit if there is an error creating the rest.Config.
func GetConfigOrDie() *rest.Config {
config, err := GetConfig()
if err != nil {
log.Error(err, "unable to get kubeconfig")
}
return config
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
paratransit/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "paratransit.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pyomrx/gui/utils.py
|
import traceback
import webbrowser
from pathlib import Path
import wx
from pyomrx.gui.dialogs import ExceptionDialog
def handle_exception(etype, value, trace):
"""
Handler for all unhandled exceptions.
:param `etype`: the exception type (`SyntaxError`, `ZeroDivisionError`, etc...);
:type `etype`: `Exception`
:param string `value`: the exception error message;
:param string `trace`: the traceback header, if any (otherwise, it prints the
standard Python header: ``Traceback (most recent call last)``.
"""
frame = wx.GetApp().GetTopWindow()
tmp = traceback.format_exception(etype, value, trace)
exception = "".join(tmp)
print(exception)
dlg = ExceptionDialog(exception, parent=None, fatal=True, title='Error')
dlg.ShowModal()
dlg.Destroy()
def open_folder(path):
if Path(path).is_file():
path = path.parent
webbrowser.open(str(path))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
00_BASICS/main.py
|
# Turning off the warnings
import os, sys, json, time
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow import keras
import PIL.Image as Image
import numpy as np
import io
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.preprocessing.image import img_to_array
from flask import Flask, request, jsonify, make_response
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
model = None
import flask
# Loading the model
def load_model():
global model
model = ResNet50(weights="imagenet")
def prepare_image(image, target):
# if the image is not RGB then convert it to RGB
if image.mode != "RGB":
image = image.convert("RGB")
# resize the image to desired shape
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
return image
@app.route("/predict", methods=["POST"])
def predict():
data = {"success": False}
if request.method == "POST":
if request.files.get("image"):
# read the image in PIL format
image = request.files.get("image").read()
image = Image.open(io.BytesIO(image))
# preprocess the image
image = prepare_image(image, target=(224, 224))
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
for (imageID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
data["success"] = True
return jsonify(data)
@app.route('/', methods=["GET", "POST"])
def hello():
return "Hello world"
if __name__ == '__main__':
print("loading the model please await....")
load_model()
app.run(host="localhost", port=3001)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
cmd/minikube/cmd/start_test.go
|
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"strings"
"testing"
"github.com/blang/semver/v4"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/proxy"
)
func TestGetKubernetesVersion(t *testing.T) {
var tests = []struct {
description string
expectedVersion string
paramVersion string
cfg *cfg.ClusterConfig
}{
{
description: "kubernetes-version not given, no config",
expectedVersion: constants.DefaultKubernetesVersion,
paramVersion: "",
},
{
description: "kubernetes-version not given, config available",
expectedVersion: "v1.15.0",
paramVersion: "",
cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}},
},
{
description: "kubernetes-version given, no config",
expectedVersion: "v1.15.0",
paramVersion: "v1.15.0",
},
{
description: "kubernetes-version given, config available",
expectedVersion: "v1.16.0",
paramVersion: "v1.16.0",
cfg: &cfg.ClusterConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}},
},
{
description: "kubernetes-version given as 'stable', no config",
expectedVersion: constants.DefaultKubernetesVersion,
paramVersion: "stable",
},
{
description: "kubernetes-version given as 'latest', no config",
expectedVersion: constants.NewestKubernetesVersion,
paramVersion: "latest",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
viper.SetDefault(kubernetesVersion, test.paramVersion)
version := getKubernetesVersion(test.cfg)
// check whether we are getting the expected version
if version != test.expectedVersion {
t.Fatalf("test failed because the expected version %s is not returned", test.expectedVersion)
}
})
}
}
var checkRepoMock = func(v semver.Version, repo string) error {
return nil
}
func TestMirrorCountry(t *testing.T) {
// Set default disk size value in lieu of flag init
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
checkRepository = checkRepoMock
k8sVersion := constants.DefaultKubernetesVersion
var tests = []struct {
description string
k8sVersion string
imageRepository string
mirrorCountry string
cfg *cfg.ClusterConfig
}{
{
description: "repository-none_mirror-none",
imageRepository: "",
mirrorCountry: "",
},
{
description: "repository-none_mirror-cn",
imageRepository: "",
mirrorCountry: "cn",
},
{
description: "repository-auto_mirror-none",
imageRepository: "auto",
mirrorCountry: "",
},
{
description: "repository-auto_mirror-cn",
imageRepository: "auto",
mirrorCountry: "cn",
},
{
description: "repository-registry.test.com_mirror-none",
imageRepository: "registry.test.com",
mirrorCountry: "",
},
{
description: "repository-registry.test.com_mirror-cn",
imageRepository: "registry.test.com",
mirrorCountry: "cn",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cmd := &cobra.Command{}
viper.SetDefault(imageRepository, test.imageRepository)
viper.SetDefault(imageMirrorCountry, test.mirrorCountry)
viper.SetDefault(kvmNUMACount, 1)
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, driver.Mock)
if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err)
}
// the result can still be "", but anyway
_ = config.KubernetesConfig.ImageRepository
})
}
}
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
// Set default disk size value in lieu of flag init
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
originalEnv := os.Getenv("HTTP_PROXY")
defer func() {
err := os.Setenv("HTTP_PROXY", originalEnv)
if err != nil {
t.Fatalf("Error reverting env HTTP_PROXY to it's original value. Got err: %s", err)
}
}()
k8sVersion := constants.NewestKubernetesVersion
var tests = []struct {
description string
proxy string
proxyIgnored bool
}{
{
description: "http_proxy=127.0.0.1:3128",
proxy: "127.0.0.1:3128",
proxyIgnored: true,
},
{
description: "http_proxy=localhost:3128",
proxy: "localhost:3128",
proxyIgnored: true,
},
{
description: "http_proxy=http://localhost:3128",
proxy: "http://localhost:3128",
proxyIgnored: true,
},
{
description: "http_proxy=http://127.0.0.1:3128",
proxy: "http://127.0.0.1:3128",
proxyIgnored: true,
},
{
description: "http_proxy=http://1.2.127.0:3128",
proxy: "http://1.2.127.0:3128",
},
{
description: "http_proxy=1.2.3.4:3128",
proxy: "1.2.3.4:3128",
},
{
description: "no http_proxy",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cmd := &cobra.Command{}
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
}
cfg.DockerEnv = []string{} // clear docker env to avoid pollution
proxy.SetDockerEnv()
config, _, err := generateClusterConfig(cmd, nil, k8sVersion, "none")
if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err)
}
envPrefix := "HTTP_PROXY="
proxyEnv := envPrefix + test.proxy
if test.proxy == "" {
// If test.proxy is not set, ensure HTTP_PROXY is empty
for _, v := range config.DockerEnv {
if strings.HasPrefix(v, envPrefix) && len(v) > len(envPrefix) {
t.Fatalf("HTTP_PROXY should be empty but got %s", v)
}
}
} else {
if test.proxyIgnored {
// ignored proxy should not in config
for _, v := range config.DockerEnv {
if v == proxyEnv {
t.Fatalf("Value %v not expected in dockerEnv but occurred", test.proxy)
}
}
} else {
// proxy must in config
found := false
for _, v := range config.DockerEnv {
if v == proxyEnv {
found = true
break
}
}
if !found {
t.Fatalf("Value %s expected in dockerEnv but not occurred", test.proxy)
}
}
}
})
}
}
func TestSuggestMemoryAllocation(t *testing.T) {
var tests = []struct {
description string
sysLimit int
containerLimit int
nodes int
want int
}{
{"128GB sys", 128000, 0, 1, 6000},
{"64GB sys", 64000, 0, 1, 6000},
{"32GB sys", 32768, 0, 1, 6000},
{"16GB sys", 16384, 0, 1, 4000},
{"odd sys", 14567, 0, 1, 3600},
{"4GB sys", 4096, 0, 1, 2200},
{"2GB sys", 2048, 0, 1, 2048},
{"Unable to poll sys", 0, 0, 1, 2200},
{"128GB sys, 16GB container", 128000, 16384, 1, 16336},
{"64GB sys, 16GB container", 64000, 16384, 1, 16000},
{"16GB sys, 4GB container", 16384, 4096, 1, 4000},
{"4GB sys, 3.5GB container", 16384, 3500, 1, 3452},
{"16GB sys, 2GB container", 16384, 2048, 1, 2048},
{"16GB sys, unable to poll container", 16384, 0, 1, 4000},
{"128GB sys 2 nodes", 128000, 0, 2, 6000},
{"8GB sys 3 nodes", 8192, 0, 3, 2200},
{"16GB sys 2 nodes", 16384, 0, 2, 2200},
{"32GB sys 2 nodes", 32768, 0, 2, 4050},
{"odd sys 2 nodes", 14567, 0, 2, 2200},
{"4GB sys 2 nodes", 4096, 0, 2, 2200},
{"2GB sys 3 nodes", 2048, 0, 3, 2048},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit, test.nodes)
if got != test.want {
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
}
})
}
}
func TestBaseImageFlagDriverCombo(t *testing.T) {
tests := []struct {
driver string
canUseBaseImg bool
}{
{driver.Docker, true},
{driver.Podman, true},
{driver.None, false},
{driver.KVM2, false},
{driver.VirtualBox, false},
{driver.HyperKit, false},
{driver.VMware, false},
{driver.VMwareFusion, false},
{driver.HyperV, false},
{driver.Parallels, false},
{"something_invalid", false},
{"", false},
}
for _, test := range tests {
t.Run(test.driver, func(t *testing.T) {
got := isBaseImageApplicable(test.driver)
if got != test.canUseBaseImg {
t.Errorf("isBaseImageApplicable(driver=%v): got %v, expected %v",
test.driver, got, test.canUseBaseImg)
}
})
}
}
func TestValidateImageRepository(t *testing.T) {
var tests = []struct {
imageRepository string
validImageRepository string
}{
{
imageRepository: "auto",
validImageRepository: "auto",
},
{
imageRepository: "http://registry.test.com/google_containers/",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com/google_containers/",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "registry.test.com/google_containers/",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "http://registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com/google_containers",
validImageRepository: "registry.test.com/google_containers",
},
{
imageRepository: "https://registry.test.com:6666/google_containers",
validImageRepository: "registry.test.com:6666/google_containers",
},
}
for _, test := range tests {
t.Run(test.imageRepository, func(t *testing.T) {
validImageRepository := validateImageRepository(test.imageRepository)
if validImageRepository != test.validImageRepository {
t.Errorf("validateImageRepository(imageRepo=%v): got %v, expected %v",
test.imageRepository, validImageRepository, test.validImageRepository)
}
})
}
}
func TestValidateDiskSize(t *testing.T) {
var tests = []struct {
diskSize string
errorMsg string
}{
{
diskSize: "2G",
errorMsg: "",
},
{
diskSize: "test",
errorMsg: "Validation unable to parse disk size test: FromHumanSize: invalid size: 'test'",
},
{
diskSize: "6M",
errorMsg: fmt.Sprintf("Requested disk size 6 is less than minimum of %v", minimumDiskSize),
},
}
for _, test := range tests {
t.Run(test.diskSize, func(t *testing.T) {
got := validateDiskSize(test.diskSize)
gotError := ""
if got != nil {
gotError = got.Error()
}
if gotError != test.errorMsg {
t.Errorf("validateDiskSize(diskSize=%v): got %v, expected %v", test.diskSize, got, test.errorMsg)
}
})
}
}
func TestValidateRuntime(t *testing.T) {
var tests = []struct {
runtime string
errorMsg string
}{
{
runtime: "cri-o",
errorMsg: "",
},
{
runtime: "docker",
errorMsg: "",
},
{
runtime: "test",
errorMsg: fmt.Sprintf("Invalid Container Runtime: test. Valid runtimes are: %v", cruntime.ValidRuntimes()),
},
}
for _, test := range tests {
t.Run(test.runtime, func(t *testing.T) {
got := validateRuntime(test.runtime)
gotError := ""
if got != nil {
gotError = got.Error()
}
if gotError != test.errorMsg {
t.Errorf("ValidateRuntime(runtime=%v): got %v, expected %v", test.runtime, got, test.errorMsg)
}
})
}
}
func TestValidatePorts(t *testing.T) {
type portTest struct {
ports []string
errorMsg string
}
var tests = []portTest{
{
ports: []string{"test:80"},
errorMsg: "Sorry, one of the ports provided with --ports flag is not valid [test:80]",
},
{
ports: []string{"0:80"},
errorMsg: "Sorry, one of the ports provided with --ports flag is outside range [0:80]",
},
{
ports: []string{"8080:80", "6443:443"},
errorMsg: "",
},
}
if detect.IsMicrosoftWSL() {
tests = append(tests, portTest{
ports: []string{"80:80"},
errorMsg: "Sorry, you cannot use privileged ports on the host (below 1024) [80:80]",
})
}
for _, test := range tests {
t.Run(strings.Join(test.ports, ","), func(t *testing.T) {
gotError := ""
got := validatePorts(test.ports)
if got != nil {
gotError = got.Error()
}
if gotError != test.errorMsg {
t.Errorf("validatePorts(ports=%v): got %v, expected %v", test.ports, got, test.errorMsg)
}
})
}
}
|
[
"\"HTTP_PROXY\""
] |
[] |
[
"HTTP_PROXY"
] |
[]
|
["HTTP_PROXY"]
|
go
| 1 | 0 | |
klaviyo_test.go
|
package klaviyo
import (
"os"
"testing"
"time"
)
var (
testPersonId = os.Getenv("KlaviyoTestPersonId")
testListId = os.Getenv("KlaviyoTestListId")
)
const (
attrIsTest = "IsTest"
attrLikesGold = "LikesGold"
)
func newTestClient() *Client {
return &Client{
PublicKey: os.Getenv("KlaviyoPublicKey"),
PrivateKey: os.Getenv("KlaviyoPrivateKey"),
DefaultTimeout: time.Second,
}
}
func TestClient_Identify(t *testing.T) {
client := newTestClient()
p := newTestPerson()
err := client.Identify(&p)
if err != nil {
t.Fatal(err)
}
}
func TestClient_GetPerson(t *testing.T) {
client := newTestClient()
p, err := client.GetPerson(testPersonId)
if err != nil {
t.Fatal(err)
}
if p == nil {
t.Fatal("Returned person was nil")
}
}
func TestClient_UpdatePerson(t *testing.T) {
client := newTestClient()
p, err := client.GetPerson(testPersonId)
if err != nil {
t.Fatal(err)
}
if p == nil {
t.Fatal("Returned person was nil")
}
t.Log("attr likes gold", p.Attributes[attrLikesGold])
likesGold := p.Attributes.ParseBool(attrLikesGold)
t.Log("parsed likes gold", likesGold)
likesGold = !likesGold
p.Attributes[attrLikesGold] = likesGold
err = client.UpdatePerson(p)
if err != nil {
t.Fatal(err)
}
// Verify update went through
b, err := client.GetPerson(p.Id)
if err != nil {
t.Fatal(err)
}
t.Log("b attr likes gold", b.Attributes[attrLikesGold])
if _, ok := b.Attributes[attrLikesGold]; !ok {
t.Fatalf("Did not find attribute %s", attrLikesGold)
} else if b.Attributes.ParseBool(attrLikesGold) != likesGold {
t.Fatalf("Attribute did not match for %s", attrLikesGold)
}
}
func TestClient_InList(t *testing.T) {
client := newTestClient()
p := newTestPerson()
// This checks to make sure the test user is in the test list
xs, err := client.InList(testListId, []string{p.Email}, nil, nil)
if err != nil {
t.Fatal(err)
}
if len(xs) != 1 {
t.Fatalf("Expected 1 ListPerson in array")
}
if xs[0].Email != p.Email {
t.Fatalf("Returned ListPerson.Email does not match input")
}
// This checks that a real user is not in the test list
xs, err = client.InList(testListId, []string{"[email protected]"}, nil, nil)
if err != nil {
t.Fatal(err)
}
if len(xs) != 0 {
t.Fatalf("User should not appear in the test list!")
}
}
// This test expects that your list is using single opt-in settings. Double opt-in will not return any results.
func TestClient_Subscribe(t *testing.T) {
email := "[email protected]"
client := newTestClient()
// TODO get list information on double opt-in status to adapt test checks
res, err := client.Subscribe(testListId, []string{email}, nil)
if err != nil {
t.Fatal(err)
}
if len(res) != 1 {
t.Fatal("Expected 1 result back from Subscribe call, please make sure that you are using single opt-in")
} else if res[0].Email != email {
t.Fatalf("Result email did not match input email")
}
}
func TestClient_Unsubscribe(t *testing.T) {
email := "[email protected]"
client := newTestClient()
err := client.Unsubscribe(testListId, []string{email}, nil, nil)
if err != nil {
t.Fatal(err)
}
}
|
[
"\"KlaviyoTestPersonId\"",
"\"KlaviyoTestListId\"",
"\"KlaviyoPublicKey\"",
"\"KlaviyoPrivateKey\""
] |
[] |
[
"KlaviyoTestPersonId",
"KlaviyoTestListId",
"KlaviyoPrivateKey",
"KlaviyoPublicKey"
] |
[]
|
["KlaviyoTestPersonId", "KlaviyoTestListId", "KlaviyoPrivateKey", "KlaviyoPublicKey"]
|
go
| 4 | 0 | |
cmd/root.go
|
package cmd
import (
"fmt"
"os"
"strings"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/fatih/color"
"github.com/marcosnils/bin/pkg/config"
"github.com/spf13/cobra"
)
func Execute(version string, exit func(int), args []string) {
// enable colored output on travis
if os.Getenv("CI") != "" {
color.NoColor = false
}
log.SetHandler(cli.Default)
// fmt.Println()
// defer fmt.Println()
newRootCmd(version, exit).Execute(args)
}
func (cmd *rootCmd) Execute(args []string) {
cmd.cmd.SetArgs(args)
if defaultCommand(cmd.cmd, args) {
cmd.cmd.SetArgs(append([]string{"list"}, args...))
}
if err := cmd.cmd.Execute(); err != nil {
code := 1
msg := "command failed"
if eerr, ok := err.(*exitError); ok {
code = eerr.code
if eerr.details != "" {
msg = eerr.details
}
}
log.WithError(err).Error(msg)
cmd.exit(code)
}
}
type rootCmd struct {
cmd *cobra.Command
debug bool
exit func(int)
}
func newRootCmd(version string, exit func(int)) *rootCmd {
root := &rootCmd{
exit: exit,
}
cmd := &cobra.Command{
Use: "bin",
Short: "Effortless binary manager",
Version: version,
SilenceUsage: true,
SilenceErrors: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if root.debug {
log.SetLevel(log.DebugLevel)
log.Debug("debug logs enabled")
}
// check and load config after handlers are configured
err := config.CheckAndLoad()
if err != nil {
log.Fatalf("Error loading config file %v", err)
}
},
}
cmd.PersistentFlags().BoolVar(&root.debug, "debug", false, "Enable debug mode")
cmd.AddCommand(
newInstallCmd().cmd,
newEnsureCmd().cmd,
newUpdateCmd().cmd,
newRemoveCmd().cmd,
newListCmd().cmd,
newPruneCmd().cmd,
)
root.cmd = cmd
return root
}
func defaultCommand(cmd *cobra.Command, args []string) bool {
// find current cmd, if its not root, it means the user actively
// set a command, so let it go
xmd, _, _ := cmd.Find(args)
if xmd != cmd {
return false
}
// if we have != 1 args, assume its a ls
if len(args) != 1 {
return true
}
// given that its 1, check if its one of the valid standalone flags
// for the root cmd
for _, s := range []string{"-h", "--help", "-v", "--version"} {
if s == args[0] {
// if it is, we should run the root cmd
return false
}
}
// otherwise, we should probably prepend ls
return true
}
func getBinPath(name string) (string, error) {
if strings.Contains(name, "/") {
return name, nil
}
cfg := config.Get()
for _, bin := range cfg.Bins {
if bin.RemoteName == name {
return bin.Path, nil
}
}
return "", fmt.Errorf("Binary path %s not found", name)
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
network_test.go
|
// +build all unittests
package infoblox
import (
"os"
"testing"
)
var (
networkConfig = Config{
Host: os.Getenv("INFOBLOX_HOST"),
Port: os.Getenv("INFOBLOX_PORT"),
Username: os.Getenv("INFOBLOX_USERNAME"),
Password: os.Getenv("INFOBLOX_PASSWORD"),
Version: os.Getenv("INFOBLOX_VERSION"),
DisableTLSVerification: true,
}
networkClient = New(networkConfig)
testNetwork = Network{
CIDR: "172.19.4.0/24",
NetworkView: "default",
Comment: "testing",
ExtensibleAttributes: newExtensibleAttribute(ExtensibleAttribute{
"Owner": ExtensibleAttributeValue{
Value: "testUser",
},
"Gateway": ExtensibleAttributeValue{
Value: "172.19.4.1",
},
}),
}
)
func TestCreateNetwork(t *testing.T) {
err := networkClient.CreateNetwork(&testNetwork)
if err != nil {
t.Errorf("Error creating network: %s", err)
}
}
func TestGetNetwork(t *testing.T) {
network, err := networkClient.GetNetworkByRef(testNetwork.Ref, nil)
if err != nil {
t.Errorf("Error retrieving network: %s", err)
}
prettyPrint(network)
}
func TestUpdateNetwork(t *testing.T) {
updates := Network{
Comment: "testing2",
ExtensibleAttributes: newExtensibleAttribute(ExtensibleAttribute{
"Location": ExtensibleAttributeValue{
Value: "austin",
},
}),
}
network, err := networkClient.UpdateNetwork(testNetwork.Ref, updates)
if err != nil {
t.Errorf("Error retrieving network: %s", err)
}
eas := *network.ExtensibleAttributes
if eas["Location"].Value.(string) != "austin" {
t.Errorf("Error updating network. EA value does not match expected value")
}
prettyPrint(network)
testNetwork = network
}
func TestDeleteNetwork(t *testing.T) {
err := networkClient.DeleteNetwork(testNetwork.Ref)
if err != nil {
t.Errorf("Error deleting network: %s", err)
}
}
func TestLogoutNetwork(t *testing.T) {
err := networkClient.Logout()
if err != nil {
t.Errorf("Error logging out: %s", err)
}
}
|
[
"\"INFOBLOX_HOST\"",
"\"INFOBLOX_PORT\"",
"\"INFOBLOX_USERNAME\"",
"\"INFOBLOX_PASSWORD\"",
"\"INFOBLOX_VERSION\""
] |
[] |
[
"INFOBLOX_VERSION",
"INFOBLOX_USERNAME",
"INFOBLOX_PASSWORD",
"INFOBLOX_PORT",
"INFOBLOX_HOST"
] |
[]
|
["INFOBLOX_VERSION", "INFOBLOX_USERNAME", "INFOBLOX_PASSWORD", "INFOBLOX_PORT", "INFOBLOX_HOST"]
|
go
| 5 | 0 | |
cli/pkg/lookup/name_types.go
|
package lookup
import (
"regexp"
"github.com/rancher/rio/cli/pkg/types"
namer "github.com/rancher/rio/pkg/name"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
dns1035 string = "[a-z]([-a-z0-9]*[a-z0-9])?"
alphanumeric string = "[a-zA-Z0-9._-]*"
FullDomainNameTypeNameType = NameType("domainName")
SingleNameNameType = NameType("singleName")
NamespaceScopedNameType = NameType("stackScoped")
NamespacedSecretNameType = NameType("secretName")
ThreePartsNameType = NameType("threeParts")
VersionedSingleNameNameType = NameType("versionedSingleName")
VersionedStackScopedNameType = NameType("versionedStackScoped")
)
type NameType string
var (
nameTypes = map[NameType]nameType{
FullDomainNameTypeNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "\\." + dns1035 + ".*" + "$"),
lookup: resolveFullDomain,
},
SingleNameNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "$"),
lookup: resolveSingleName,
},
VersionedSingleNameNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + ":" + dns1035 + "$"),
lookup: resolveStackScoped,
},
NamespaceScopedNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "/" + dns1035 + "$"),
lookup: resolveStackScoped,
},
NamespacedSecretNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "/" + alphanumeric + "$"),
lookup: resolveStackScoped,
},
VersionedStackScopedNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "/" + dns1035 + ":" + alphanumeric + "$"),
lookup: resolveStackScoped,
},
ThreePartsNameType: {
Regexp: regexp.MustCompile("^" + dns1035 + "/" + dns1035 + "/" + dns1035 + "$"),
lookup: resolvePod,
},
}
)
type nameType struct {
types map[string]bool
Regexp *regexp.Regexp
lookup func(defaultStackName, name, typeName string) types.Resource
}
func (n nameType) Lookup(lookup ClientLookup, name, typeName string) (types.Resource, error) {
if !n.types[typeName] {
return types.Resource{}, errors.NewNotFound(schema.GroupResource{}, name)
}
var r types.Resource
switch typeName {
case types.FeatureType:
r = n.lookup(lookup.GetSystemNamespace(), name, typeName)
default:
namespace := lookup.GetSetNamespace()
if namespace == "" {
namespace = lookup.GetDefaultNamespace()
}
r = n.lookup(namespace, name, typeName)
}
r, err := lookup.ByID(r.Namespace, r.Name, typeName)
r.LookupName = name
return r, err
}
func (n nameType) Matches(name string) bool {
return n.Regexp.MatchString(name)
}
func RegisterType(typeName string, supportedNameTypes ...NameType) {
for _, nameType := range supportedNameTypes {
if nameTypes[nameType].types == nil {
t := nameTypes[nameType]
t.types = map[string]bool{}
nameTypes[nameType] = t
}
nameTypes[nameType].types[typeName] = true
}
}
func resolveFullDomain(defaultStackName, name, typeName string) types.Resource {
return types.Resource{
Namespace: defaultStackName,
Name: namer.PublicDomain(name),
Type: typeName,
}
}
func resolveSingleName(defaultStackName, name, typeName string) types.Resource {
return types.Resource{
Namespace: defaultStackName,
Name: name,
Type: typeName,
}
}
func resolveStackScoped(defaultStackName, name, typeName string) types.Resource {
stackScoped := ParseStackScoped(defaultStackName, name)
return types.Resource{
Namespace: stackScoped.StackName,
Name: stackScoped.ResourceName,
Type: typeName,
}
}
func resolvePod(defaultStackName, name, typeName string) types.Resource {
container, _ := ParseContainer(defaultStackName, name)
return types.Resource{
Namespace: container.Service.StackName,
Name: container.PodName + "/" + container.ContainerName,
Type: typeName,
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
bundle/src/main/java/com/adobe/acs/commons/hc/impl/HealthCheckStatusEmailer.java
|
/*
* #%L
* ACS AEM Commons Bundle
* %%
* Copyright (C) 2017 Adobe
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.adobe.acs.commons.hc.impl;
import com.adobe.acs.commons.email.EmailService;
import com.adobe.acs.commons.util.ModeUtil;
import com.adobe.granite.license.ProductInfo;
import com.adobe.granite.license.ProductInfoService;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.ConfigurationPolicy;
import org.apache.felix.scr.annotations.Properties;
import org.apache.felix.scr.annotations.Property;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.Service;
import org.apache.sling.commons.osgi.PropertiesUtil;
import org.apache.sling.hc.api.execution.HealthCheckExecutionOptions;
import org.apache.sling.hc.api.execution.HealthCheckExecutionResult;
import org.apache.sling.hc.api.execution.HealthCheckExecutor;
import org.apache.sling.settings.SlingSettingsService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
@Component(
label = "ACS AEM Commons - Health Check Status E-mailer",
description = "Scheduled Service that runs specified Health Checks and e-mails the results",
configurationFactory = true,
policy = ConfigurationPolicy.REQUIRE,
metatype = true
)
@Properties({
@Property(
label = "Cron expression defining when this Scheduled Service will run",
name = "scheduler.expression",
description = "Every weekday @ 8am = [ 0 0 8 ? * MON-FRI * ] Visit www.cronmaker.com to generate cron expressions.",
value = "0 0 8 ? * MON-FRI *"
),
@Property(
name = "scheduler.concurrent",
boolValue = false,
propertyPrivate = true
),
@Property(
name = "scheduler.runOn",
value = "LEADER",
propertyPrivate = true
)
})
@Service(value = Runnable.class)
public class HealthCheckStatusEmailer implements Runnable {
private final Logger log = LoggerFactory.getLogger(HealthCheckStatusEmailer.class);
private static final int HEALTH_CHECK_STATUS_PADDING = 20;
private static final int NUM_DASHES = 100;
private Calendar nextEmailTime = Calendar.getInstance();
/* OSGi Properties */
private static final String DEFAULT_EMAIL_TEMPLATE_PATH = "/etc/notification/email/acs-commons/health-check-status-email.txt";
private String emailTemplatePath = DEFAULT_EMAIL_TEMPLATE_PATH;
@Property(label = "E-mail Template Path",
description = "The absolute JCR path to the e-mail template",
value = DEFAULT_EMAIL_TEMPLATE_PATH)
public static final String PROP_TEMPLATE_PATH = "email.template.path";
private static final String DEFAULT_EMAIL_SUBJECT_PREFIX = "AEM Health Check report";
private String emailSubject = DEFAULT_EMAIL_SUBJECT_PREFIX;
@Property(label = "E-mail Subject Prefix",
description = "The e-mail subject prefix. E-mail subject format is: <E-mail Subject Prefix> [ # Failures ] [ # Success ] [ <AEM Instance Name> ]",
value = DEFAULT_EMAIL_SUBJECT_PREFIX)
public static final String PROP_EMAIL_SUBJECT = "email.subject";
private static final boolean DEFAULT_SEND_EMAIL_ONLY_ON_FAILURE = true;
private boolean sendEmailOnlyOnFailure = DEFAULT_SEND_EMAIL_ONLY_ON_FAILURE;
@Property(label = "Send e-mail only on failure",
description = "If true, an e-mail is ONLY sent if at least 1 Health Check failure occurs. [ Default: true ]",
boolValue = DEFAULT_SEND_EMAIL_ONLY_ON_FAILURE)
public static final String PROP_SEND_EMAIL_ONLY_ON_FAILURE = "email.send-only-on-failure";
private static final String[] DEFAULT_RECIPIENT_EMAIL_ADDRESSES = new String[]{};
private String[] recipientEmailAddresses = DEFAULT_RECIPIENT_EMAIL_ADDRESSES;
@Property(label = "Recipient E-mail Addresses",
description = "A list of e-mail addresses to send this e-mail to.",
cardinality = Integer.MAX_VALUE,
value = {})
public static final String PROP_RECIPIENTS_EMAIL_ADDRESSES = "recipients.email-addresses";
private static final String[] DEFAULT_HEALTH_CHECK_TAGS = new String[]{"system"};
private String[] healthCheckTags = DEFAULT_HEALTH_CHECK_TAGS;
@Property(label = "Health Check Tags",
description = "The AEM Health Check Tag names to execute. [ Default: system ]",
cardinality = Integer.MAX_VALUE,
value = {"system"})
public static final String PROP_HEALTH_CHECK_TAGS = "hc.tags";
private static final int DEFAULT_HEALTH_CHECK_TIMEOUT_OVERRIDE = -1;
private int healthCheckTimeoutOverride = DEFAULT_HEALTH_CHECK_TIMEOUT_OVERRIDE;
@Property(label = "Health Check Timeout Override",
description = "The AEM Health Check timeout override in milliseconds. Set < 1 to disable. [ Default: -1 ]",
intValue = DEFAULT_HEALTH_CHECK_TIMEOUT_OVERRIDE)
public static final String PROP_HEALTH_CHECK_TIMEOUT_OVERRIDE = "hc.timeout.override";
private static final boolean DEFAULT_HEALTH_CHECK_TAGS_OPTIONS_OR = true;
private boolean healthCheckTagsOptionsOr = DEFAULT_HEALTH_CHECK_TAGS_OPTIONS_OR;
@Property(label = "'OR' Health Check Tags",
description = "When set to true, all Health Checks that are in any of the Health Check Tags (hc.tags) are executed. If false, then the Health Check must be in ALL of the Health Check tags (hc.tags). [ Default: true ]",
boolValue = DEFAULT_HEALTH_CHECK_TAGS_OPTIONS_OR)
public static final String PROP_HEALTH_CHECK_TAGS_OPTIONS_OR = "hc.tags.options.or";
private static final String DEFAULT_FALLBACK_HOSTNAME = "Unknown AEM Instance";
private String fallbackHostname = DEFAULT_FALLBACK_HOSTNAME;
@Property(label = "Hostname Fallback",
description = "The value used to identify this AEM instance if the programmatic hostname look-up fails to produce results..",
value = DEFAULT_FALLBACK_HOSTNAME)
public static final String PROP_FALLBACK_HOSTNAME = "hostname.fallback";
private static final int DEFAULT_THROTTLE_IN_MINS = 15;
private int throttleInMins = DEFAULT_THROTTLE_IN_MINS;
@Property(label = "Quiet Period in Minutes",
description = "Defines a time span that prevents this service from sending more than 1 e-mail per quiet period. This prevents e-mail spamming for frequent checks that only e-mail on failure. Default: [ 15 mins ]",
intValue = DEFAULT_THROTTLE_IN_MINS)
public static final String PROP_THROTTLE = "quiet.minutes";
@Property(
name = "webconsole.configurationFactory.nameHint",
value = "Health Check Status E-mailer running every [ {scheduler.expression} ] using Health Check Tags [ {hc.tags} ] to [ {recipients.email-addresses} ]"
)
@Reference
private ProductInfoService productInfoService;
@Reference
private SlingSettingsService slingSettingsService;
@Reference
private EmailService emailService;
@Reference
private HealthCheckExecutor healthCheckExecutor;
@Override
public final void run() {
log.trace("Executing ACS Commons Health Check E-mailer scheduled service");
final List<HealthCheckExecutionResult> success = new ArrayList<>();
final List<HealthCheckExecutionResult> failure = new ArrayList<>();
final long start = System.currentTimeMillis();
final HealthCheckExecutionOptions options = new HealthCheckExecutionOptions();
options.setForceInstantExecution(true);
options.setCombineTagsWithOr(healthCheckTagsOptionsOr);
if (healthCheckTimeoutOverride > 0) {
options.setOverrideGlobalTimeout(healthCheckTimeoutOverride);
}
final List<HealthCheckExecutionResult> results = healthCheckExecutor.execute(options, healthCheckTags);
log.debug("Obtained [ {} ] results for Health Check tags [ {} ]", results.size(), StringUtils.join(healthCheckTags, ", "));
for (HealthCheckExecutionResult result : results) {
if (result.getHealthCheckResult().isOk()) {
success.add(result);
} else {
failure.add(result);
}
}
final long timeTaken = System.currentTimeMillis() - start;
log.info("Executed ACS Commons Health Check E-mailer scheduled service in [ {} ms ]", timeTaken);
if (!sendEmailOnlyOnFailure || (sendEmailOnlyOnFailure && failure.size() > 0)) {
if (nextEmailTime == null || Calendar.getInstance().after(nextEmailTime)) {
sendEmail(success, failure, timeTaken);
synchronized (nextEmailTime) {
nextEmailTime = Calendar.getInstance();
nextEmailTime.add(Calendar.MINUTE, throttleInMins);
}
} else {
log.info("Did not send e-mail as it did not meet the e-mail throttle configured time of a [ {} ] minute quiet period. Next valid time to e-mail is [ {} ]", throttleInMins, nextEmailTime.getTime());
}
} else {
log.debug("Declining to send e-mail notification of 100% successful Health Check execution due to configuration.");
}
}
/**
* Creates the e-mail template parameter map and invokes the OSGi E-Mail Service.
*
* @param success the list of successful Health Check Execution Results
* @param failure the list of unsuccessful Health Check Execution Results
* @param timeTaken the time taken to execute all Health Checks
*/
protected final void sendEmail(final List<HealthCheckExecutionResult> success, final List<HealthCheckExecutionResult> failure, final long timeTaken) {
final ProductInfo[] productInfos = productInfoService.getInfos();
final String hostname = getHostname();
final Map<String, String> emailParams = new HashMap<>();
emailParams.put("subject", String.format("%s [ %d Failures ] [ %d Success ] [ %s ]", emailSubject, failure.size(), success.size(), hostname));
emailParams.put("failure", resultToPlainText("Failing Health Checks", failure));
emailParams.put("success", resultToPlainText("Successful Health Checks", success));
emailParams.put("executedAt", Calendar.getInstance().getTime().toString());
emailParams.put("runModes", StringUtils.join(slingSettingsService.getRunModes(), ", "));
emailParams.put("mode", ModeUtil.isAuthor() ? "Author" : "Publish");
emailParams.put("hostname", hostname);
emailParams.put("timeTaken", String.valueOf(timeTaken));
if (productInfos.length == 1) {
emailParams.put("productName", productInfos[0].getShortName());
emailParams.put("productVersion", productInfos[0].getShortVersion());
}
emailParams.put("successCount", String.valueOf(success.size()));
emailParams.put("failureCount", String.valueOf(failure.size()));
emailParams.put("totalCount", String.valueOf(failure.size() + success.size()));
if (ArrayUtils.isNotEmpty(recipientEmailAddresses)) {
final List<String> failureList = emailService.sendEmail(emailTemplatePath, emailParams, recipientEmailAddresses);
if (failureList.size() > 0) {
log.warn("Could not send health status check e-mails to recipients [ {} ]", StringUtils.join(failureList, ", "));
} else {
log.info("Successfully sent Health Check email to [ {} ] recipients", recipientEmailAddresses.length - failureList.size());
}
} else {
log.warn("No e-mail addresses provided to e-mail results of health checks. Either add the appropriate e-mail recipients or remove the health check status e-mail configuration entirely.");
}
}
/**
* Gererates the plain-text email sections for sets of Health Check Execution Results.
*
* @param title The section title
* @param results the Health Check Execution Results to render as plain text
* @return the String for this section to be embedded in the e-mail
*/
protected String resultToPlainText(final String title, final List<HealthCheckExecutionResult> results) {
final StringBuilder sb = new StringBuilder();
sb.append(title);
sb.append(System.lineSeparator());
if (results.size() == 0) {
sb.append("No " + StringUtils.lowerCase(title) + " could be found!");
sb.append(System.lineSeparator());
} else {
sb.append(StringUtils.repeat("-", NUM_DASHES));
sb.append(System.lineSeparator());
for (final HealthCheckExecutionResult result : results) {
sb.append(StringUtils.rightPad("[ " + result.getHealthCheckResult().getStatus().name() + " ]", HEALTH_CHECK_STATUS_PADDING));
sb.append(" ");
sb.append(result.getHealthCheckMetadata().getTitle());
sb.append(System.lineSeparator());
}
}
return sb.toString();
}
/**
* OSGi Activate method.
*
* @param config the OSGi config params
*/
@Activate
protected final void activate(final Map<String, Object> config) {
emailTemplatePath = PropertiesUtil.toString(config.get(PROP_TEMPLATE_PATH), DEFAULT_EMAIL_TEMPLATE_PATH);
emailSubject = PropertiesUtil.toString(config.get(PROP_EMAIL_SUBJECT), DEFAULT_EMAIL_SUBJECT_PREFIX);
fallbackHostname = PropertiesUtil.toString(config.get(PROP_FALLBACK_HOSTNAME), DEFAULT_FALLBACK_HOSTNAME);
recipientEmailAddresses = PropertiesUtil.toStringArray(config.get(PROP_RECIPIENTS_EMAIL_ADDRESSES), DEFAULT_RECIPIENT_EMAIL_ADDRESSES);
healthCheckTags = PropertiesUtil.toStringArray(config.get(PROP_HEALTH_CHECK_TAGS), DEFAULT_HEALTH_CHECK_TAGS);
healthCheckTagsOptionsOr = PropertiesUtil.toBoolean(config.get(PROP_HEALTH_CHECK_TAGS_OPTIONS_OR), DEFAULT_HEALTH_CHECK_TAGS_OPTIONS_OR);
sendEmailOnlyOnFailure = PropertiesUtil.toBoolean(config.get(PROP_SEND_EMAIL_ONLY_ON_FAILURE), DEFAULT_SEND_EMAIL_ONLY_ON_FAILURE);
throttleInMins = PropertiesUtil.toInteger(config.get(PROP_THROTTLE), DEFAULT_THROTTLE_IN_MINS);
if (throttleInMins < 0) {
throttleInMins = DEFAULT_THROTTLE_IN_MINS;
}
healthCheckTimeoutOverride = PropertiesUtil.toInteger(config.get(PROP_HEALTH_CHECK_TIMEOUT_OVERRIDE), DEFAULT_HEALTH_CHECK_TIMEOUT_OVERRIDE);
}
/**
* Hostname retrieval code borrowed from Malt on StackOverflow
* > https://stackoverflow.com/questions/7348711/recommended-way-to-get-hostname-in-java
** /
/**
* Attempts to get the hostname of running AEM instance. Uses the OSGi configured fallback if unavailable.
*
* @return the AEM Instance's hostname.
*/
private String getHostname() {
String hostname = null;
final String OS = System.getProperty("os.name").toLowerCase();
// Unpleasant 'if structure' to avoid making unnecessary Runtime calls; only call Runtime.
if (OS.indexOf("win") >= 0) {
hostname = System.getenv("COMPUTERNAME");
if (StringUtils.isBlank(hostname)) {
try {
hostname = execReadToString("hostname");
} catch (IOException ex) {
log.warn("Unable to collect hostname from Windows via 'hostname' command.", ex);
}
}
} else if (OS.indexOf("nix") >= 0 || OS.indexOf("nux") >= 0 || OS.indexOf("mac") >= 0) {
hostname = System.getenv("HOSTNAME");
if (StringUtils.isBlank(hostname)) {
try {
hostname = execReadToString("hostname");
} catch (IOException ex) {
log.warn("Unable to collect hostname from *nix via 'hostname' command.", ex);
}
}
if (StringUtils.isBlank(hostname)) {
try {
execReadToString("cat /etc/hostname");
} catch (IOException ex) {
log.warn("Unable to collect hostname from *nix via 'cat /etc/hostname' command.", ex);
}
}
} else {
log.warn("Unidentifiable OS [ {} ]. Could not collect hostname.", OS);
}
hostname = StringUtils.trimToNull(hostname);
if (StringUtils.isBlank(hostname)) {
log.debug("Unable to derive hostname from OS; defaulting to OSGi Configured value [ {} ]", fallbackHostname);
return fallbackHostname;
} else {
log.debug("Derived hostname from OS: [ {} ]", hostname);
return hostname;
}
}
/**
* Execute a command in the system's runtime.
*
* @param execCommand the command to execute in the Runtime
* @return the result of the command
* @throws IOException
*/
private String execReadToString(String execCommand) throws IOException {
Process proc = Runtime.getRuntime().exec(execCommand);
try (InputStream stream = proc.getInputStream()) {
try (Scanner s = new Scanner(stream).useDelimiter("\\A")) {
return s.hasNext() ? s.next() : "";
}
}
}
}
|
[
"\"COMPUTERNAME\"",
"\"HOSTNAME\""
] |
[] |
[
"COMPUTERNAME",
"HOSTNAME"
] |
[]
|
["COMPUTERNAME", "HOSTNAME"]
|
java
| 2 | 0 | |
lib/galaxy/tool_util/deps/brew_exts.py
|
#!/usr/bin/env python
# % brew vinstall samtools 1.0
# % brew vinstall samtools 0.1.19
# % brew vinstall samtools 1.1
# % brew env samtools 1.1
# PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/bin:/home/john/.linuxbrew/Cellar/samtools/1.1/bin:$PATH
# export PATH
# LD_LIBRARY_PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/lib:/home/john/.linuxbrew/Cellar/samtools/1.1/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH
# % . <(brew env samtools 1.1)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/1.1/bin/samtools
# % . <(brew env samtools 0.1.19)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/0.1.19/bin/samtools
# % brew vuninstall samtools 1.0
# % brew vdeps samtools 1.1
# [email protected]
# % brew vdeps samtools 0.1.19
import argparse
import contextlib
import glob
import json
import os
import re
import string
import subprocess
import sys
WHITESPACE_PATTERN = re.compile(r"[\s]+")
DESCRIPTION = "Script built on top of linuxbrew to operate on isolated, versioned brew installed environments."
if sys.platform == "darwin":
DEFAULT_HOMEBREW_ROOT = "/usr/local"
else:
DEFAULT_HOMEBREW_ROOT = os.path.join(os.path.expanduser("~"), ".linuxbrew")
NO_BREW_ERROR_MESSAGE = "Could not find brew on PATH, please place on path or pass to script with --brew argument."
CANNOT_DETERMINE_TAP_ERROR_MESSAGE = "Cannot determine tap of specified recipe - please use fully qualified recipe (e.g. homebrew/science/samtools)."
VERBOSE = False
RELAXED = False
BREW_ARGS = []
class BrewContext:
def __init__(self, args=None):
ensure_brew_on_path(args)
raw_config = brew_execute(["config"])
config_lines = [l.strip().split(":", 1) for l in raw_config.split("\n") if l]
config = {p[0].strip(): p[1].strip() for p in config_lines}
# unset if "/usr/local" -> https://github.com/Homebrew/homebrew/blob/master/Library/Homebrew/cmd/config.rb
homebrew_prefix = config.get("HOMEBREW_PREFIX", "/usr/local")
homebrew_cellar = config.get("HOMEBREW_CELLAR", os.path.join(homebrew_prefix, "Cellar"))
self.homebrew_prefix = homebrew_prefix
self.homebrew_cellar = homebrew_cellar
class RecipeContext:
@staticmethod
def from_args(args, brew_context=None):
return RecipeContext(args.recipe, args.version, brew_context)
def __init__(self, recipe, version, brew_context=None):
self.recipe = recipe
self.version = version
self.brew_context = brew_context or BrewContext()
@property
def cellar_path(self):
return recipe_cellar_path(self.brew_context.homebrew_cellar, self.recipe, self.version)
@property
def tap_path(self):
return os.path.join(self.brew_context.homebrew_prefix, "Library", "Taps", self.__tap_path(self.recipe))
def __tap_path(self, recipe):
parts = recipe.split("/")
if len(parts) == 1:
info = brew_info(self.recipe)
from_url = info["from_url"]
if not from_url:
raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
from_url_parts = from_url.split("/")
blob_index = from_url_parts.index("blob") # comes right after username and repository
if blob_index < 2:
raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
username = from_url_parts[blob_index - 2]
repository = from_url_parts[blob_index - 1]
else:
assert len(parts) == 3
parts = recipe.split("/")
username = parts[0]
repository = "homebrew-%s" % parts[1]
path = os.path.join(username, repository)
return path
def main():
global VERBOSE
global RELAXED
global BREW_ARGS
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--brew", help="Path to linuxbrew 'brew' executable to target")
actions = ["vinstall", "vuninstall", "vdeps", "vinfo", "env"]
action = __action(sys)
if not action:
parser.add_argument('action', metavar='action', help="Versioned action to perform.", choices=actions)
parser.add_argument('recipe', metavar='recipe', help="Recipe for action - should be absolute (e.g. homebrew/science/samtools).")
parser.add_argument('version', metavar='version', help="Version for action (e.g. 0.1.19).")
parser.add_argument('--relaxed', action='store_true', help="Relaxed processing - for instance allow use of env on non-vinstall-ed recipes.")
parser.add_argument('--verbose', action='store_true', help="Verbose output")
parser.add_argument('restargs', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.verbose:
VERBOSE = True
if args.relaxed:
RELAXED = True
BREW_ARGS = args.restargs
if not action:
action = args.action
brew_context = BrewContext(args)
recipe_context = RecipeContext.from_args(args, brew_context)
if action == "vinstall":
versioned_install(recipe_context, args.recipe, args.version)
elif action == "vuninstall":
brew_execute(["switch", args.recipe, args.version])
brew_execute(["uninstall", args.recipe])
elif action == "vdeps":
print_versioned_deps(recipe_context, args.recipe, args.version)
elif action == "env":
env_statements = build_env_statements_from_recipe_context(recipe_context)
print(env_statements)
elif action == "vinfo":
with brew_head_at_version(recipe_context, args.recipe, args.version):
print(brew_info(args.recipe))
else:
raise NotImplementedError()
class CommandLineException(Exception):
def __init__(self, command, stdout, stderr):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.message = ("Failed to execute command-line %s, stderr was:\n"
"-------->>begin stderr<<--------\n"
"%s\n"
"-------->>end stderr<<--------\n"
"-------->>begin stdout<<--------\n"
"%s\n"
"-------->>end stdout<<--------\n"
) % (command, stderr, stdout)
def __str__(self):
return self.message
def versioned_install(recipe_context, package=None, version=None, installed_deps=[]):
if package is None:
package = recipe_context.recipe
version = recipe_context.version
attempt_unlink(package)
with brew_head_at_version(recipe_context, package, version):
deps = brew_deps(package)
deps_metadata = []
dep_to_version = {}
for dep in deps:
version_info = brew_versions_info(dep, recipe_context.tap_path)[0]
dep_version = version_info[0]
dep_to_version[dep] = dep_version
versioned = version_info[2]
if versioned:
dep_to_version[dep] = dep_version
if dep in installed_deps:
continue
versioned_install(recipe_context, dep, dep_version)
installed_deps.append(dep)
else:
# Install latest.
dep_to_version[dep] = None
if dep in installed_deps:
continue
unversioned_install(dep)
try:
for dep in deps:
dep_version = dep_to_version[dep]
if dep_version:
brew_execute(["switch", dep, dep_version])
else:
brew_execute(["link", dep])
# dep_version obtained from brew versions doesn't
# include revision. This linked_keg attribute does.
keg_verion = brew_info(dep)["linked_keg"]
dep_metadata = {
'name': dep,
'version': keg_verion,
'versioned': versioned
}
deps_metadata.append(dep_metadata)
cellar_root = recipe_context.brew_context.homebrew_cellar
cellar_path = recipe_context.cellar_path
env_actions = build_env_actions(deps_metadata, cellar_root, cellar_path, custom_only=True)
env = EnvAction.build_env(env_actions)
args = ["install"]
if VERBOSE:
args.append("--verbose")
args.extend(BREW_ARGS)
args.append(package)
brew_execute(args, env=env)
deps = brew_execute(["deps", package])
deps = [d.strip() for d in deps.split("\n") if d]
metadata = {
'deps': deps_metadata
}
cellar_root = recipe_context.brew_context.homebrew_cellar
cellar_path = recipe_cellar_path(cellar_root, package, version)
v_metadata_path = os.path.join(cellar_path, "INSTALL_RECEIPT_VERSIONED.json")
with open(v_metadata_path, "w") as f:
json.dump(metadata, f)
finally:
attempt_unlink_all(package, deps)
def commit_for_version(recipe_context, package, version):
tap_path = recipe_context.tap_path
commit = None
with brew_head_at_commit("master", tap_path):
version_to_commit = brew_versions_info(package, tap_path)
if version is None:
version = version_to_commit[0][0]
commit = version_to_commit[0][1]
else:
for mapping in version_to_commit:
if mapping[0] == version:
commit = mapping[1]
if commit is None:
raise Exception("Failed to find commit for version %s" % version)
return commit
def print_versioned_deps(recipe_context, recipe, version):
deps = load_versioned_deps(recipe_context.cellar_path)
for dep in deps:
val = dep['name']
if dep['versioned']:
val += "@%s" % dep['version']
print(val)
def load_versioned_deps(cellar_path, relaxed=None):
if relaxed is None:
relaxed = RELAXED
v_metadata_path = os.path.join(cellar_path, "INSTALL_RECEIPT_VERSIONED.json")
if not os.path.isfile(v_metadata_path):
if RELAXED:
return []
else:
raise OSError(f"Could not locate versioned receipt file: {v_metadata_path}")
with open(v_metadata_path) as f:
metadata = json.load(f)
return metadata['deps']
def unversioned_install(package):
try:
deps = brew_deps(package)
for dep in deps:
brew_execute(["link", dep])
brew_execute(["install", package])
finally:
attempt_unlink_all(package, deps)
def attempt_unlink_all(package, deps):
for dep in deps:
attempt_unlink(dep)
attempt_unlink(package)
def attempt_unlink(package):
try:
brew_execute(["unlink", package])
except Exception:
# TODO: warn
pass
def brew_execute(args, env=None):
os.environ["HOMEBREW_NO_EMOJI"] = "1" # simplify brew parsing.
cmds = ["brew"] + args
return execute(cmds, env=env)
def build_env_statements_from_recipe_context(recipe_context, **kwds):
cellar_root = recipe_context.brew_context.homebrew_cellar
env_statements = build_env_statements(cellar_root, recipe_context.cellar_path, **kwds)
return env_statements
def build_env_statements(cellar_root, cellar_path, relaxed=None, custom_only=False):
deps = load_versioned_deps(cellar_path, relaxed=relaxed)
actions = build_env_actions(deps, cellar_root, cellar_path, relaxed, custom_only)
env_statements = []
for action in actions:
env_statements.extend(action.to_statements())
return "\n".join(env_statements)
def build_env_actions(deps, cellar_root, cellar_path, relaxed=None, custom_only=False):
path_appends = []
ld_path_appends = []
actions = []
def handle_keg(cellar_path):
bin_path = os.path.join(cellar_path, "bin")
if os.path.isdir(bin_path):
path_appends.append(bin_path)
lib_path = os.path.join(cellar_path, "lib")
if os.path.isdir(lib_path):
ld_path_appends.append(lib_path)
env_path = os.path.join(cellar_path, "platform_environment.json")
if os.path.exists(env_path):
with open(env_path) as f:
env_metadata = json.load(f)
if "actions" in env_metadata:
def to_action(desc):
return EnvAction(cellar_path, desc)
actions.extend(map(to_action, env_metadata["actions"]))
for dep in deps:
package = dep['name']
version = dep['version']
dep_cellar_path = recipe_cellar_path(cellar_root, package, version)
handle_keg(dep_cellar_path)
handle_keg(cellar_path)
if not custom_only:
if path_appends:
actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "PATH", "value": ":".join(path_appends)}))
if ld_path_appends:
actions.append(EnvAction(cellar_path, {"action": "prepend", "variable": "LD_LIBRARY_PATH", "value": ":".join(path_appends)}))
return actions
class EnvAction:
def __init__(self, keg_root, action_description):
self.variable = action_description["variable"]
self.action = action_description["action"]
self.value = string.Template(action_description["value"]).safe_substitute({
'KEG_ROOT': keg_root,
})
@staticmethod
def build_env(env_actions):
new_env = os.environ.copy()
map(lambda env_action: env_action.modify_environ(new_env), env_actions)
return new_env
def modify_environ(self, environ):
if self.action == "set" or not environ.get(self.variable, ""):
environ[self.variable] = self.__eval("${value}")
elif self.action == "prepend":
environ[self.variable] = self.__eval("${value}:%s" % environ[self.variable])
else:
environ[self.variable] = self.__eval("%s:${value}" % environ[self.variable])
def __eval(self, template):
return string.Template(template).safe_substitute(
variable=self.variable,
value=self.value,
)
def to_statements(self):
if self.action == "set":
template = '''${variable}="${value}"'''
elif self.action == "prepend":
template = '''${variable}="${value}:$$${variable}"'''
else:
template = '''${variable}="$$${variable}:${value}"'''
return [
self.__eval(template),
"export %s" % self.variable
]
@contextlib.contextmanager
def brew_head_at_version(recipe_context, package, version):
commit = commit_for_version(recipe_context, package, version)
tap_path = recipe_context.tap_path
with brew_head_at_commit(commit, tap_path):
yield
@contextlib.contextmanager
def brew_head_at_commit(commit, tap_path):
try:
os.chdir(tap_path)
current_commit = git_execute(["rev-parse", "HEAD"]).strip()
try:
git_execute(["checkout", commit])
yield
finally:
git_execute(["checkout", current_commit])
finally:
# TODO: restore chdir - or better yet just don't chdir
# shouldn't be needed.
pass
def git_execute(args):
cmds = ["git"] + args
return execute(cmds)
def execute(cmds, env=None):
subprocess_kwds = dict(
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if env:
subprocess_kwds["env"] = env
p = subprocess.Popen(cmds, **subprocess_kwds)
# log = p.stdout.read()
global VERBOSE
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CommandLineException(" ".join(cmds), stdout, stderr)
if VERBOSE:
print(stdout)
return stdout
def brew_deps(package):
args = ["deps"]
args.extend(BREW_ARGS)
args.append(package)
stdout = brew_execute(args)
return [p.strip() for p in stdout.split("\n") if p]
def brew_info(recipe):
info_json = brew_execute(["info", "--json=v1", recipe])
info = json.loads(info_json)[0]
info.update(extended_brew_info(recipe))
return info
def extended_brew_info(recipe):
# Extract more info from non-json variant. JSON variant should
# include this in a backward compatible way (TODO: Open PR).
raw_info = brew_execute(["info", recipe])
extra_info = dict(
from_url=None,
build_dependencies=[],
required_dependencies=[],
recommended_dependencies=[],
optional_dependencies=[],
)
for line in raw_info.split("\n"):
if line.startswith("From: "):
extra_info["from_url"] = line[len("From: "):].strip()
for dep_type in ["Build", "Required", "Recommended", "Optional"]:
if line.startswith("%s: " % dep_type):
key = "%s_dependencies" % dep_type.lower()
raw_val = line[len("%s: " % dep_type):]
extra_info[key].extend(raw_val.split(", "))
return extra_info
def brew_versions_info(package, tap_path):
def versioned(recipe_path):
if not os.path.isabs(recipe_path):
recipe_path = os.path.join(os.getcwd(), recipe_path)
# Dependencies in the same repository should be versioned,
# core dependencies (presumably in base homebrew) are not
# versioned.
return tap_path in recipe_path
# TODO: Also use tags.
stdout = brew_execute(["versions", package])
version_parts = [l for l in stdout.split("\n") if l and "git checkout" in l]
version_parts = map(lambda l: WHITESPACE_PATTERN.split(l), version_parts)
info = [(p[0], p[3], versioned(p[4])) for p in version_parts]
return info
def __action(sys):
script_name = os.path.basename(sys.argv[0])
if script_name.startswith("brew-"):
return script_name[len("brew-"):]
else:
return None
def recipe_cellar_path(cellar_path, recipe, version):
recipe_base = recipe.split("/")[-1]
recipe_base_path = os.path.join(cellar_path, recipe_base, version)
revision_paths = glob.glob(recipe_base_path + "_*")
if revision_paths:
revisions = map(lambda x: int(x.rsplit("_", 1)[-1]), revision_paths)
max_revision = max(revisions)
recipe_path = "%s_%d" % (recipe_base_path, max_revision)
else:
recipe_path = recipe_base_path
return recipe_path
def ensure_brew_on_path(args):
brew_on_path = which("brew")
if brew_on_path:
brew_on_path = os.path.abspath(brew_on_path)
def ensure_on_path(brew):
if brew != brew_on_path:
os.environ["PATH"] = "{}:{}".format(os.path.dirname(brew), os.environ["PATH"])
default_brew_path = os.path.join(DEFAULT_HOMEBREW_ROOT, "bin", "brew")
if args and args.brew:
user_brew_path = os.path.abspath(args.brew)
ensure_on_path(user_brew_path)
elif brew_on_path:
return brew_on_path
elif os.path.exists(default_brew_path):
ensure_on_path(default_brew_path)
else:
raise Exception(NO_BREW_ERROR_MESSAGE)
def which(file):
# http://stackoverflow.com/questions/5226958/which-equivalent-function-in-python
for path in os.environ["PATH"].split(":"):
if os.path.exists(path + "/" + file):
return path + "/" + file
return None
if __name__ == "__main__":
main()
|
[] |
[] |
[
"HOMEBREW_NO_EMOJI",
"PATH"
] |
[]
|
["HOMEBREW_NO_EMOJI", "PATH"]
|
python
| 2 | 0 | |
vendor/github.com/getsentry/sentry-go/client.go
|
package sentry
import (
"context"
"crypto/x509"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/getsentry/sentry-go/internal/debug"
)
// maxErrorDepth is the maximum number of errors reported in a chain of errors.
// This protects the SDK from an arbitrarily long chain of wrapped errors.
//
// An additional consideration is that arguably reporting a long chain of errors
// is of little use when debugging production errors with Sentry. The Sentry UI
// is not optimized for long chains either. The top-level error together with a
// stack trace is often the most useful information.
const maxErrorDepth = 10
// hostname is the host name reported by the kernel. It is precomputed once to
// avoid syscalls when capturing events.
//
// The error is ignored because retrieving the host name is best-effort. If the
// error is non-nil, there is nothing to do other than retrying. We choose not
// to retry for now.
var hostname, _ = os.Hostname()
// lockedRand is a random number generator safe for concurrent use. Its API is
// intentionally limited and it is not meant as a full replacement for a
// rand.Rand.
type lockedRand struct {
mu sync.Mutex
r *rand.Rand
}
// Float64 returns a pseudo-random number in [0.0,1.0).
func (r *lockedRand) Float64() float64 {
r.mu.Lock()
defer r.mu.Unlock()
return r.r.Float64()
}
// rng is the internal random number generator.
//
// We do not use the global functions from math/rand because, while they are
// safe for concurrent use, any package in a build could change the seed and
// affect the generated numbers, for instance making them deterministic. On the
// other hand, the source returned from rand.NewSource is not safe for
// concurrent use, so we need to couple its use with a sync.Mutex.
var rng = &lockedRand{
r: rand.New(rand.NewSource(time.Now().UnixNano())),
}
// usageError is used to report to Sentry an SDK usage error.
//
// It is not exported because it is never returned by any function or method in
// the exported API.
type usageError struct {
error
}
// Logger is an instance of log.Logger that is use to provide debug information about running Sentry Client
// can be enabled by either using Logger.SetOutput directly or with Debug client option.
var Logger = log.New(ioutil.Discard, "[Sentry] ", log.LstdFlags)
// EventProcessor is a function that processes an event.
// Event processors are used to change an event before it is sent to Sentry.
type EventProcessor func(event *Event, hint *EventHint) *Event
// EventModifier is the interface that wraps the ApplyToEvent method.
//
// ApplyToEvent changes an event based on external data and/or
// an event hint.
type EventModifier interface {
ApplyToEvent(event *Event, hint *EventHint) *Event
}
var globalEventProcessors []EventProcessor
// AddGlobalEventProcessor adds processor to the global list of event
// processors. Global event processors apply to all events.
//
// AddGlobalEventProcessor is deprecated. Most users will prefer to initialize
// the SDK with Init and provide a ClientOptions.BeforeSend function or use
// Scope.AddEventProcessor instead.
func AddGlobalEventProcessor(processor EventProcessor) {
globalEventProcessors = append(globalEventProcessors, processor)
}
// Integration allows for registering a functions that modify or discard captured events.
type Integration interface {
Name() string
SetupOnce(client *Client)
}
// ClientOptions that configures a SDK Client.
type ClientOptions struct {
// The DSN to use. If the DSN is not set, the client is effectively
// disabled.
Dsn string
// In debug mode, the debug information is printed to stdout to help you
// understand what sentry is doing.
Debug bool
// Configures whether SDK should generate and attach stacktraces to pure
// capture message calls.
AttachStacktrace bool
// The sample rate for event submission in the range [0.0, 1.0]. By default,
// all events are sent. Thus, as a historical special case, the sample rate
// 0.0 is treated as if it was 1.0. To drop all events, set the DSN to the
// empty string.
SampleRate float64
// The sample rate for sampling traces in the range [0.0, 1.0].
TracesSampleRate float64
// Used to customize the sampling of traces, overrides TracesSampleRate.
TracesSampler TracesSampler
// List of regexp strings that will be used to match against event's message
// and if applicable, caught errors type and value.
// If the match is found, then a whole event will be dropped.
IgnoreErrors []string
// BeforeSend is called before error events are sent to Sentry.
// Use it to mutate the event or return nil to discard the event.
// See EventProcessor if you need to mutate transactions.
BeforeSend func(event *Event, hint *EventHint) *Event
// Before breadcrumb add callback.
BeforeBreadcrumb func(breadcrumb *Breadcrumb, hint *BreadcrumbHint) *Breadcrumb
// Integrations to be installed on the current Client, receives default
// integrations.
Integrations func([]Integration) []Integration
// io.Writer implementation that should be used with the Debug mode.
DebugWriter io.Writer
// The transport to use. Defaults to HTTPTransport.
Transport Transport
// The server name to be reported.
ServerName string
// The release to be sent with events.
//
// Some Sentry features are built around releases, and, thus, reporting
// events with a non-empty release improves the product experience. See
// https://docs.sentry.io/product/releases/.
//
// If Release is not set, the SDK will try to derive a default value
// from environment variables or the Git repository in the working
// directory.
//
// If you distribute a compiled binary, it is recommended to set the
// Release value explicitly at build time. As an example, you can use:
//
// go build -ldflags='-X main.release=VALUE'
//
// That will set the value of a predeclared variable 'release' in the
// 'main' package to 'VALUE'. Then, use that variable when initializing
// the SDK:
//
// sentry.Init(ClientOptions{Release: release})
//
// See https://golang.org/cmd/go/ and https://golang.org/cmd/link/ for
// the official documentation of -ldflags and -X, respectively.
Release string
// The dist to be sent with events.
Dist string
// The environment to be sent with events.
Environment string
// Maximum number of breadcrumbs.
MaxBreadcrumbs int
// An optional pointer to http.Client that will be used with a default
// HTTPTransport. Using your own client will make HTTPTransport, HTTPProxy,
// HTTPSProxy and CaCerts options ignored.
HTTPClient *http.Client
// An optional pointer to http.Transport that will be used with a default
// HTTPTransport. Using your own transport will make HTTPProxy, HTTPSProxy
// and CaCerts options ignored.
HTTPTransport http.RoundTripper
// An optional HTTP proxy to use.
// This will default to the HTTP_PROXY environment variable.
HTTPProxy string
// An optional HTTPS proxy to use.
// This will default to the HTTPS_PROXY environment variable.
// HTTPS_PROXY takes precedence over HTTP_PROXY for https requests.
HTTPSProxy string
// An optional set of SSL certificates to use.
CaCerts *x509.CertPool
}
// Client is the underlying processor that is used by the main API and Hub
// instances. It must be created with NewClient.
type Client struct {
options ClientOptions
dsn *Dsn
eventProcessors []EventProcessor
integrations []Integration
// Transport is read-only. Replacing the transport of an existing client is
// not supported, create a new client instead.
Transport Transport
}
// NewClient creates and returns an instance of Client configured using
// ClientOptions.
//
// Most users will not create clients directly. Instead, initialize the SDK with
// Init and use the package-level functions (for simple programs that run on a
// single goroutine) or hub methods (for concurrent programs, for example web
// servers).
func NewClient(options ClientOptions) (*Client, error) {
if options.TracesSampleRate != 0.0 && options.TracesSampler != nil {
return nil, errors.New("TracesSampleRate and TracesSampler are mutually exclusive")
}
if options.Debug {
debugWriter := options.DebugWriter
if debugWriter == nil {
debugWriter = os.Stderr
}
Logger.SetOutput(debugWriter)
}
if options.Dsn == "" {
options.Dsn = os.Getenv("SENTRY_DSN")
}
if options.Release == "" {
options.Release = defaultRelease()
}
if options.Environment == "" {
options.Environment = os.Getenv("SENTRY_ENVIRONMENT")
}
// SENTRYGODEBUG is a comma-separated list of key=value pairs (similar
// to GODEBUG). It is not a supported feature: recognized debug options
// may change any time.
//
// The intended public is SDK developers. It is orthogonal to
// options.Debug, which is also available for SDK users.
dbg := strings.Split(os.Getenv("SENTRYGODEBUG"), ",")
sort.Strings(dbg)
// dbgOpt returns true when the given debug option is enabled, for
// example SENTRYGODEBUG=someopt=1.
dbgOpt := func(opt string) bool {
s := opt + "=1"
return dbg[sort.SearchStrings(dbg, s)%len(dbg)] == s
}
if dbgOpt("httpdump") || dbgOpt("httptrace") {
options.HTTPTransport = &debug.Transport{
RoundTripper: http.DefaultTransport,
Output: os.Stderr,
Dump: dbgOpt("httpdump"),
Trace: dbgOpt("httptrace"),
}
}
var dsn *Dsn
if options.Dsn != "" {
var err error
dsn, err = NewDsn(options.Dsn)
if err != nil {
return nil, err
}
}
client := Client{
options: options,
dsn: dsn,
}
client.setupTransport()
client.setupIntegrations()
return &client, nil
}
func (client *Client) setupTransport() {
opts := client.options
transport := opts.Transport
if transport == nil {
if opts.Dsn == "" {
transport = new(noopTransport)
} else {
httpTransport := NewHTTPTransport()
// When tracing is enabled, use larger buffer to
// accommodate more concurrent events.
// TODO(tracing): consider using separate buffers per
// event type.
if opts.TracesSampleRate != 0 || opts.TracesSampler != nil {
httpTransport.BufferSize = 1000
}
transport = httpTransport
}
}
transport.Configure(opts)
client.Transport = transport
}
func (client *Client) setupIntegrations() {
integrations := []Integration{
new(contextifyFramesIntegration),
new(environmentIntegration),
new(modulesIntegration),
new(ignoreErrorsIntegration),
}
if client.options.Integrations != nil {
integrations = client.options.Integrations(integrations)
}
for _, integration := range integrations {
if client.integrationAlreadyInstalled(integration.Name()) {
Logger.Printf("Integration %s is already installed\n", integration.Name())
continue
}
client.integrations = append(client.integrations, integration)
integration.SetupOnce(client)
Logger.Printf("Integration installed: %s\n", integration.Name())
}
}
// AddEventProcessor adds an event processor to the client. It must not be
// called from concurrent goroutines. Most users will prefer to use
// ClientOptions.BeforeSend or Scope.AddEventProcessor instead.
//
// Note that typical programs have only a single client created by Init and the
// client is shared among multiple hubs, one per goroutine, such that adding an
// event processor to the client affects all hubs that share the client.
func (client *Client) AddEventProcessor(processor EventProcessor) {
client.eventProcessors = append(client.eventProcessors, processor)
}
// Options return ClientOptions for the current Client.
func (client Client) Options() ClientOptions {
return client.options
}
// CaptureMessage captures an arbitrary message.
func (client *Client) CaptureMessage(message string, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromMessage(message, LevelInfo)
return client.CaptureEvent(event, hint, scope)
}
// CaptureException captures an error.
func (client *Client) CaptureException(exception error, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromException(exception, LevelError)
return client.CaptureEvent(event, hint, scope)
}
// CaptureEvent captures an event on the currently active client if any.
//
// The event must already be assembled. Typically code would instead use
// the utility methods like CaptureException. The return value is the
// event ID. In case Sentry is disabled or event was dropped, the return value will be nil.
func (client *Client) CaptureEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
return client.processEvent(event, hint, scope)
}
// Recover captures a panic.
// Returns EventID if successfully, or nil if there's no error to recover from.
func (client *Client) Recover(err interface{}, hint *EventHint, scope EventModifier) *EventID {
if err == nil {
err = recover()
}
// Normally we would not pass a nil Context, but RecoverWithContext doesn't
// use the Context for communicating deadline nor cancelation. All it does
// is store the Context in the EventHint and there nil means the Context is
// not available.
//nolint: staticcheck
return client.RecoverWithContext(nil, err, hint, scope)
}
// RecoverWithContext captures a panic and passes relevant context object.
// Returns EventID if successfully, or nil if there's no error to recover from.
func (client *Client) RecoverWithContext(
ctx context.Context,
err interface{},
hint *EventHint,
scope EventModifier,
) *EventID {
if err == nil {
err = recover()
}
if err == nil {
return nil
}
if ctx != nil {
if hint == nil {
hint = &EventHint{}
}
if hint.Context == nil {
hint.Context = ctx
}
}
var event *Event
switch err := err.(type) {
case error:
event = client.eventFromException(err, LevelFatal)
case string:
event = client.eventFromMessage(err, LevelFatal)
default:
event = client.eventFromMessage(fmt.Sprintf("%#v", err), LevelFatal)
}
return client.CaptureEvent(event, hint, scope)
}
// Flush waits until the underlying Transport sends any buffered events to the
// Sentry server, blocking for at most the given timeout. It returns false if
// the timeout was reached. In that case, some events may not have been sent.
//
// Flush should be called before terminating the program to avoid
// unintentionally dropping events.
//
// Do not call Flush indiscriminately after every call to CaptureEvent,
// CaptureException or CaptureMessage. Instead, to have the SDK send events over
// the network synchronously, configure it to use the HTTPSyncTransport in the
// call to Init.
func (client *Client) Flush(timeout time.Duration) bool {
return client.Transport.Flush(timeout)
}
func (client *Client) eventFromMessage(message string, level Level) *Event {
if message == "" {
err := usageError{fmt.Errorf("%s called with empty message", callerFunctionName())}
return client.eventFromException(err, level)
}
event := NewEvent()
event.Level = level
event.Message = message
if client.Options().AttachStacktrace {
event.Threads = []Thread{{
Stacktrace: NewStacktrace(),
Crashed: false,
Current: true,
}}
}
return event
}
func (client *Client) eventFromException(exception error, level Level) *Event {
err := exception
if err == nil {
err = usageError{fmt.Errorf("%s called with nil error", callerFunctionName())}
}
event := NewEvent()
event.Level = level
for i := 0; i < maxErrorDepth && err != nil; i++ {
event.Exception = append(event.Exception, Exception{
Value: err.Error(),
Type: reflect.TypeOf(err).String(),
Stacktrace: ExtractStacktrace(err),
})
switch previous := err.(type) {
case interface{ Unwrap() error }:
err = previous.Unwrap()
case interface{ Cause() error }:
err = previous.Cause()
default:
err = nil
}
}
// Add a trace of the current stack to the most recent error in a chain if
// it doesn't have a stack trace yet.
// We only add to the most recent error to avoid duplication and because the
// current stack is most likely unrelated to errors deeper in the chain.
if event.Exception[0].Stacktrace == nil {
event.Exception[0].Stacktrace = NewStacktrace()
}
// event.Exception should be sorted such that the most recent error is last.
reverse(event.Exception)
return event
}
// reverse reverses the slice a in place.
func reverse(a []Exception) {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
}
func (client *Client) processEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
if event == nil {
err := usageError{fmt.Errorf("%s called with nil event", callerFunctionName())}
return client.CaptureException(err, hint, scope)
}
options := client.Options()
// The default error event sample rate for all SDKs is 1.0 (send all).
//
// In Go, the zero value (default) for float64 is 0.0, which means that
// constructing a client with NewClient(ClientOptions{}), or, equivalently,
// initializing the SDK with Init(ClientOptions{}) without an explicit
// SampleRate would drop all events.
//
// To retain the desired default behavior, we exceptionally flip SampleRate
// from 0.0 to 1.0 here. Setting the sample rate to 0.0 is not very useful
// anyway, and the same end result can be achieved in many other ways like
// not initializing the SDK, setting the DSN to the empty string or using an
// event processor that always returns nil.
//
// An alternative API could be such that default options don't need to be
// the same as Go's zero values, for example using the Functional Options
// pattern. That would either require a breaking change if we want to reuse
// the obvious NewClient name, or a new function as an alternative
// constructor.
if options.SampleRate == 0.0 {
options.SampleRate = 1.0
}
// Transactions are sampled by options.TracesSampleRate or
// options.TracesSampler when they are started. All other events
// (errors, messages) are sampled here.
if event.Type != transactionType && !sample(options.SampleRate) {
Logger.Println("Event dropped due to SampleRate hit.")
return nil
}
if event = client.prepareEvent(event, hint, scope); event == nil {
return nil
}
// As per spec, transactions do not go through BeforeSend.
if event.Type != transactionType && options.BeforeSend != nil {
if hint == nil {
hint = &EventHint{}
}
if event = options.BeforeSend(event, hint); event == nil {
Logger.Println("Event dropped due to BeforeSend callback.")
return nil
}
}
client.Transport.SendEvent(event)
return &event.EventID
}
func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventModifier) *Event {
if event.EventID == "" {
event.EventID = EventID(uuid())
}
if event.Timestamp.IsZero() {
event.Timestamp = time.Now()
}
if event.Level == "" {
event.Level = LevelInfo
}
if event.ServerName == "" {
if client.Options().ServerName != "" {
event.ServerName = client.Options().ServerName
} else {
event.ServerName = hostname
}
}
if event.Release == "" && client.Options().Release != "" {
event.Release = client.Options().Release
}
if event.Dist == "" && client.Options().Dist != "" {
event.Dist = client.Options().Dist
}
if event.Environment == "" && client.Options().Environment != "" {
event.Environment = client.Options().Environment
}
event.Platform = "go"
event.Sdk = SdkInfo{
Name: "sentry.go",
Version: Version,
Integrations: client.listIntegrations(),
Packages: []SdkPackage{{
Name: "sentry-go",
Version: Version,
}},
}
if scope != nil {
event = scope.ApplyToEvent(event, hint)
if event == nil {
return nil
}
}
for _, processor := range client.eventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
return nil
}
}
for _, processor := range globalEventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
return nil
}
}
return event
}
func (client Client) listIntegrations() []string {
integrations := make([]string, 0, len(client.integrations))
for _, integration := range client.integrations {
integrations = append(integrations, integration.Name())
}
sort.Strings(integrations)
return integrations
}
func (client Client) integrationAlreadyInstalled(name string) bool {
for _, integration := range client.integrations {
if integration.Name() == name {
return true
}
}
return false
}
// sample returns true with the given probability, which must be in the range
// [0.0, 1.0].
func sample(probability float64) bool {
return rng.Float64() < probability
}
|
[
"\"SENTRY_DSN\"",
"\"SENTRY_ENVIRONMENT\"",
"\"SENTRYGODEBUG\""
] |
[] |
[
"SENTRYGODEBUG",
"SENTRY_DSN",
"SENTRY_ENVIRONMENT"
] |
[]
|
["SENTRYGODEBUG", "SENTRY_DSN", "SENTRY_ENVIRONMENT"]
|
go
| 3 | 0 | |
pump_transfer.py
|
#!/usr/bin/env python
import copy
import logging
import optparse
import os
import random
import sqlite3
import string
import sys
import threading
from typing import Optional, Union, Tuple, List, Dict
import pump
import pump_bfd
import pump_csv
import pump_cb
import pump_gen
import pump_mc
import pump_dcp
from pump import PumpingStation
def exit_handler(err: Optional[str]):
if err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
else:
sys.exit(0)
class Transfer:
"""Base class for 2.0 Backup/Restore/Transfer."""
def __init__(self):
self.name = "cbtransfer"
self.source_alias = "source"
self.sink_alias = "destination"
self.usage = \
"%prog [options] source destination\n\n" \
"Transfer couchbase cluster data from source to destination.\n\n" \
"Examples:\n" \
" %prog http://SOURCE:8091 /backups/backup-42\n" \
" %prog /backups/backup-42 http://DEST:8091\n" \
" %prog /backups/backup-42 couchbase://DEST:8091\n" \
" %prog http://SOURCE:8091 http://DEST:8091\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ /backup-XXX\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ couchbase://DEST:8091\n"
def main(self, argv, opts_etc=None):
if threading.currentThread().getName() == "MainThread":
threading.currentThread().setName("mt")
err, opts, source, sink = self.opt_parse(argv)
if err:
return err
if opts_etc:
opts.etc = opts_etc # Used for unit tests, etc.
process_name = f'{os.path.basename(argv[0])}-{"".join(random.sample(string.ascii_letters, 16))}'
setattr(opts, "process_name", process_name)
logging.info(f'{self.name}...')
logging.info(f' source : {source}')
logging.info(f' sink : {sink}')
logging.info(f' opts : {opts.safe}')
source_class, sink_class = self.find_handlers(opts, source, sink)
if not source_class:
return f'error: unknown type of source: {source}'
if not sink_class:
return f'error: unknown type of sink: {sink}'
err = sink_class.check_source(opts, source_class, source, sink_class, sink)
if err:
return err
try:
pumpStation = pump.PumpingStation(opts, source_class, source,
sink_class, sink)
rv = pumpStation.run()
self.aggregate_stats(pumpStation.cur)
return rv
except KeyboardInterrupt:
return "interrupted."
def aggregate_stats(self, cur):
return 0
def check_opts(self, opts):
return None
def opt_parse(self, argv):
p = self.opt_parser()
opts, rest = p.parse_args(argv[1:])
if len(rest) != 2:
p.print_help()
return f'\nError: please provide both a {self.source_alias} and a {self.sink_alias}', None, None, None
err = self.check_opts(opts) # pylint: disable=assignment-from-none
if err:
return err, None, None, None
min_thread = 1
max_thread = 20
if opts.threads not in list(range(min_thread, max_thread)):
return f'\nError: option -t: value is out of range [{min_thread}, {max_thread}]', None, None, None
if opts.username is None:
username = os.environ.get('CB_REST_USERNAME', None)
if username:
opts.username = username
else:
return "\nError: option -u/--username is required", None, None, None
if opts.password is None:
password = os.environ.get('CB_REST_PASSWORD', None)
if password:
opts.password = password
else:
return "\nError: option -p/--password is required", None, None, None
opts.extra = opt_parse_extra(opts.extra, self.opt_extra_defaults())
opts.safe = opt_parse_helper(opts)
return None, opts, rest[0], rest[1]
def opt_parser(self):
p = optparse.OptionParser(usage=self.usage)
opt_extra_help(p, self.opt_extra_defaults(False))
self.opt_parser_options(p)
return p
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""Single named bucket from source cluster to transfer""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""Single named bucket on destination cluster which receives transfer.
This allows you to transfer to a bucket with a different name
as your source bucket. If you do not provide defaults to the
same name as the bucket-source""")
self.opt_parser_options_common(p)
p.add_option("", "--single-node",
action="store_true", default=False,
help="""Transfer from a single server node in a source cluster,
This single server node is a source node URL""")
p.add_option("", "--source-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer from source vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer to destination vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-operation",
action="store", type="string", default=None,
help="""Perform this operation on transfer.
'set' will override an existing document,
'add' will not override, 'get' will load all keys transferred
from a source cluster into the caching layer at the destination""")
def opt_parser_options_common(self, p):
p.add_option("-i", "--id",
action="store", type="int", default=None,
help="""Transfer only items that match a vbucketID""")
p.add_option("-k", "--key",
action="store", type="string", default=None,
help="""Transfer only items with keys that match a regexp""")
p.add_option("", "--vbucket-list",
action="store", type="string", default=None,
help=optparse.SUPPRESS_HELP)
p.add_option("-n", "--dry-run",
action="store_true", default=False,
help="""No actual transfer; just validate parameters, files,
connectivity and configurations""")
p.add_option("-u", "--username",
action="store", type="string", default=None,
help="REST username for source cluster or server node")
p.add_option("-p", "--password",
action="store", type="string", default=None,
help="REST password for source cluster or server node")
p.add_option("-U", "--username-dest",
action="store", type="string", default=None,
help="REST username for destination cluster or server node")
p.add_option("-P", "--password-dest",
action="store", type="string", default=None,
help="REST password for destination cluster or server node")
p.add_option("-s", "--ssl",
action="store_true", default=False,
help="Transfer data with SSL enabled")
p.add_option("", "--no-ssl-verify", default=True, action="store_false",
help="Skips SSL verification of certificates against the CA")
p.add_option("", "--cacert", dest="cacert", default=None, action="store",
help="Verifies the cluster identity with this certificate")
p.add_option("-t", "--threads",
action="store", type="int", default=4,
help="""Number of concurrent workers threads performing the transfer""")
p.add_option("-v", "--verbose",
action="count", default=0,
help="verbose logging; more -v's provide more verbosity. Max is -vvv")
p.add_option("", "--silent", action="store_true", default=False,
help="""Reduce logging verbosity to only include errors""")
p.add_option("-x", "--extra",
action="store", type="string", default=None,
help="""Provide extra, uncommon config parameters;
comma-separated key=val(,key=val)* pairs""")
p.add_option("-c", "--collection",
help=optparse.SUPPRESS_HELP)
p.add_option("", "--force-txn", default=False, action="store_true", help=optparse.SUPPRESS_HELP)
def opt_extra_defaults(self, add_hidden=True):
rv = {
"batch_max_size": (1000, "Transfer this # of documents per batch"),
"batch_max_bytes": (400000, "Transfer this # of bytes per batch"),
"cbb_max_mb": (100000, "Split backup file on destination cluster if it exceeds MB"),
"max_retry": (10, "Max number of sequential retries if transfer fails"),
"report": (5, "Number batches transferred before updating progress bar in console"),
"report_full": (2000, "Number batches transferred before emitting progress information in console"),
"recv_min_bytes": (4096, "Amount of bytes for every TCP/IP call transferred"),
"try_xwm": (1, "Transfer documents with metadata. 0 should only be used if you transfer from 1.8.x to 1.8.x"),
"nmv_retry": (1, "0 or 1, where 1 retries transfer after a NOT_MY_VBUCKET message"),
"rehash": (0, "For value 1, rehash the partition id's of each item; \
this is needed when transferring data between clusters with different number of partitions, \
such as when transferring data from an OSX server to a non-OSX cluster"),
"data_only": (0, "For value 1, only transfer data from a backup file or cluster"),
"design_doc_only": (0, "For value 1, transfer design documents only from a backup file or cluster"),
"conflict_resolve":(1, "By default, enable conflict resolution."),
"seqno": (0, "By default, start seqno from beginning."),
"mcd_compatible": (1, "For value 0, display extended fields for stdout output."),
"uncompress": (0, "For value 1, restore data in uncompressed mode"),
"backoff_cap": (10, "Max backoff time during rebalance period"),
"flow_control": (1, "For value 0, disable flow control to improve throughput"),
"dcp_consumer_queue_length": (1000,"A DCP client needs a queue for incoming documents/messages. A large length is more efficient, but memory proportional to length*avg. doc size. Below length 150, performance degrades significantly."),
}
if add_hidden:
rv["allow_recovery_vb_remap"] = (0, "Allows the vbucket list to override the vbucket map from the server.")
return rv
def find_handlers(self, opts, source, sink):
return (PumpingStation.find_handler(opts, source, SOURCES),
PumpingStation.find_handler(opts, sink, SINKS))
class Backup(Transfer):
"""Entry point for 2.0 cbbackup."""
def __init__(self):
self.name = "cbbackup"
self.source_alias = "source"
self.sink_alias = "backup_dir"
if self._is_enterprise():
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" The first backup to a given directory is a full backup, any subsequent ones are incremental.\n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" To take a differential backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m diff\n\n" \
" To take an accumulative backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m accu --single-node\n\n" \
"Note: A full backup task is always triggered for a new sink location\n" \
" no matter what backup mode is specified.\n"
else:
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" Take a full backup of a cluster. \n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" Take a full backup for a single node. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 --single-node\n" \
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from source to backup""")
p.add_option("", "--single-node",
action="store_true", default=False,
help="""use a single server node from the source only,
not all server nodes from the entire cluster;
this single server node is defined by the source URL""")
if self._is_enterprise():
p.add_option("-m", "--mode",
action="store", type="string", default="diff",
help="backup mode: full, diff or accu [default:%default]")
else:
p.add_option("-m", "--mode",
action="store", type="string", default="full",
help=optparse.SUPPRESS_HELP)
Transfer.opt_parser_options_common(self, p)
def find_handlers(self, opts, source, sink):
return PumpingStation.find_handler(opts, source, SOURCES), \
PumpingStation.find_handler(opts, sink, SINKS)
def check_opts(self, opts):
mode = getattr(opts, "mode", None)
if mode:
if mode not in ["full", "diff", "accu"]:
return "\nError: option mode has to be 'full', 'diff' or 'accu'"
return None
def _is_enterprise(self):
try:
import pump_bfd2
return True
except ImportError:
return False
class Restore(Transfer):
"""Entry point for 2.0 cbrestore."""
# TODO: (1) Restore - opt_parse handle 1.8 backwards compatible args.
def __init__(self):
self.name = "cbrestore"
self.source_alias = "backup_dir"
self.sink_alias = "destination"
self.usage = \
"%prog [options] backup_dir destination\n\n" \
"Restores a single couchbase bucket.\n\n" \
"Please first create the destination / bucket before restoring.\n\n" \
"Examples:\n" \
" %prog /backups/backup-42 http://HOST:8091 \\\n" \
" --bucket-source=default --from-date=2014-01-20 --to-date=2014-03-31\n" \
" %prog /backups/backup-42 couchbase://HOST:8091 \\\n" \
" --bucket-source=default\n" \
" %prog /backups/backup-42 memcached://HOST:11211 \\\n" \
" --bucket-source=sessions --bucket-destination=sessions2"
def opt_parser_options(self, p):
p.add_option("-a", "--add",
action="store_true", default=False,
help="""use add instead of set to not overwrite existing
items in the destination""")
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from the backup_dir to restore;
if the backup_dir only contains a single bucket,
then that bucket will be automatically used""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""when --bucket-source is specified, overrides the
destination bucket name; this allows you to restore
to a different bucket; defaults to the same as the
bucket-source""")
p.add_option("", "--from-date",
action="store", type="string", default=None,
help="""restore data from the date specified as yyyy-mm-dd. By default,
all data from the very beginning will be restored""")
p.add_option("", "--to-date",
action="store", type="string", default=None,
help="""restore data till the date specified as yyyy-mm-dd. By default,
all data that are collected will be restored""")
Transfer.opt_parser_options_common(self, p)
# TODO: (1) cbrestore parameter --create-design-docs=y|n
# TODO: (1) cbrestore parameter -d DATA, --data=DATA
# TODO: (1) cbrestore parameter --validate-only
# TODO: (1) cbrestore parameter -H HOST, --host=HOST
# TODO: (1) cbrestore parameter -p PORT, --port=PORT
# TODO: (1) cbrestore parameter option to override expiration?
def find_handlers(self, opts, source, sink):
return pump_bfd.BFDSource, PumpingStation.find_handler(opts, sink, SINKS)
# --------------------------------------------------
def opt_parse_helper(opts):
logging_level = logging.WARN
if opts.verbose >= 1:
logging_level = logging.INFO
if opts.verbose >= 2:
logging_level = logging.DEBUG
if opts.silent:
logging_level = logging.ERROR
logging.basicConfig(format=pump.LOGGING_FORMAT, level=logging_level)
opts_x = copy.deepcopy(opts)
if opts_x.username:
opts_x.username = "<xxx>"
if opts_x.password:
opts_x.password = "<xxx>"
return opts_x
def opt_parse_extra(extra, extra_defaults):
"""Convert an extra string (comma-separated key=val pairs) into
a dict, using default values from extra_defaults dict."""
extra_in = dict([(x[0], x[1]) for x in
[(kv + '=').split('=') for kv in
(extra or "").split(',')]])
for k, v in extra_in.items():
if k and not extra_defaults.get(k):
sys.exit("error: unknown extra option: " + k)
return dict([(k, float(extra_in.get(k, extra_defaults[k][0])))
for k in extra_defaults.keys()])
def opt_extra_help(parser, extra_defaults):
extra_help = "; ".join([f'{k}={extra_defaults[k][0]} ({extra_defaults[k][1]})'
for k in sorted(extra_defaults.keys())])
group = optparse.OptionGroup(parser, "Available extra config parameters (-x)",
extra_help)
parser.add_option_group(group)
# --------------------------------------------------
SOURCES = [pump_bfd.BFDSource,
pump_csv.CSVSource,
pump_gen.GenSource,
pump_dcp.DCPStreamSource,
pump.StdInSource]
SINKS = [pump_bfd.BFDSink,
pump_mc.MCSink,
pump_cb.CBSink,
pump_csv.CSVSink,
pump.StdOutSink]
try:
import pump_sfd
SOURCES.append(pump_sfd.SFDSource)
SINKS.append(pump_sfd.SFDSink)
except ImportError:
pass
try:
import pump_json
SOURCES.append(pump_json.JSONSource)
except ImportError:
pass
try:
import pump_bfd2
SINKS.insert(0, pump_bfd2.BFDSinkEx)
except ImportError:
pass
# TODO: (1) pump_transfer - use QUIET commands
# TODO: (1) pump_transfer - verify that nth replica got the msg
# TODO: (1) pump_transfer - ability to TAP a non-active or replica vbucket / MB-4583
# TODO: (10) pump_transfer - incremental backup/restore
if __name__ == '__main__':
sys.exit(Transfer().main(sys.argv))
|
[] |
[] |
[
"CB_REST_PASSWORD",
"CB_REST_USERNAME"
] |
[]
|
["CB_REST_PASSWORD", "CB_REST_USERNAME"]
|
python
| 2 | 0 | |
opencti-worker/src/worker.py
|
# coding: utf-8
import logging
import functools
import yaml
import pika
import os
import time
import json
import base64
import threading
import ctypes
from requests.exceptions import RequestException
from pycti import OpenCTIApiClient
PROCESSING_COUNT = 5
class Consumer(threading.Thread):
def __init__(self, connector, opencti_url, opencti_token):
threading.Thread.__init__(self)
self.opencti_url = opencti_url
self.opencti_token = opencti_token
self.api = OpenCTIApiClient(self.opencti_url, self.opencti_token)
self.queue_name = connector["config"]["push"]
self.pika_connection = pika.BlockingConnection(
pika.URLParameters(connector["config"]["uri"])
)
self.channel = self.pika_connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.processing_count = 0
def get_id(self):
if hasattr(self, "_thread_id"):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def terminate(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
thread_id, ctypes.py_object(SystemExit)
)
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
logging.info("Unable to kill the thread")
def nack_message(self, channel, delivery_tag):
if channel.is_open:
logging.info("Message (delivery_tag=" + str(delivery_tag) + ") rejected")
channel.basic_nack(delivery_tag)
else:
logging.info(
"Message (delivery_tag="
+ str(delivery_tag)
+ ") NOT rejected (channel closed)"
)
pass
def ack_message(self, channel, delivery_tag):
if channel.is_open:
logging.info(
"Message (delivery_tag=" + str(delivery_tag) + ") acknowledged"
)
channel.basic_ack(delivery_tag)
else:
logging.info(
"Message (delivery_tag="
+ str(delivery_tag)
+ ") NOT acknowledged (channel closed)"
)
pass
def stop_consume(self, channel):
if channel.is_open:
channel.stop_consuming()
# Callable for consuming a message
def _process_message(self, channel, method, properties, body):
data = json.loads(body)
logging.info(
"Processing a new message (delivery_tag="
+ str(method.delivery_tag)
+ "), launching a thread..."
)
thread = threading.Thread(
target=self.data_handler,
args=[self.pika_connection, channel, method.delivery_tag, data],
)
thread.start()
while thread.is_alive(): # Loop while the thread is processing
self.pika_connection.sleep(0.05)
logging.info("Message processed, thread terminated")
# Data handling
def data_handler(self, connection, channel, delivery_tag, data):
# Set the API headers
applicant_id = data["applicant_id"]
self.api.set_applicant_id_header(applicant_id)
work_id = data["work_id"] if "work_id" in data else None
# Execute the import
self.processing_count += 1
content = "Unparseable"
try:
content = base64.b64decode(data["content"]).decode("utf-8")
types = (
data["entities_types"]
if "entities_types" in data and len(data["entities_types"]) > 0
else None
)
update = data["update"] if "update" in data else False
processing_count = self.processing_count
if self.processing_count == PROCESSING_COUNT:
processing_count = None
self.api.stix2.import_bundle_from_json(
content, update, types, processing_count
)
# Ack the message
cb = functools.partial(self.ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
if work_id is not None:
self.api.work.report_expectation(work_id, None)
self.processing_count = 0
return True
except RequestException as re:
logging.error("A connection error occurred: { " + str(re) + " }")
time.sleep(60)
logging.info(
"Message (delivery_tag=" + str(delivery_tag) + ") NOT acknowledged"
)
cb = functools.partial(self.nack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
self.processing_count = 0
return False
except Exception as ex:
error = str(ex)
if (
"UnsupportedError" not in error
and self.processing_count < PROCESSING_COUNT
):
time.sleep(1)
logging.info(
"Message (delivery_tag="
+ str(delivery_tag)
+ ") reprocess (retry nb: "
+ str(self.processing_count)
+ ")"
)
self.data_handler(connection, channel, delivery_tag, data)
else:
logging.error(str(ex))
self.processing_count = 0
cb = functools.partial(self.ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
if work_id is not None:
self.api.work.report_expectation(
work_id, {"error": str(ex), "source": content}
)
return False
def run(self):
try:
# Consume the queue
logging.info("Thread for queue " + self.queue_name + " started")
self.channel.basic_consume(
queue=self.queue_name, on_message_callback=self._process_message
)
self.channel.start_consuming()
finally:
self.channel.stop_consuming()
logging.info("Thread for queue " + self.queue_name + " terminated")
class Worker:
def __init__(self):
self.logs_all_queue = "logs_all"
self.consumer_threads = {}
self.logger_threads = {}
# Get configuration
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.log_level = os.getenv("WORKER_LOG_LEVEL") or config["worker"]["log_level"]
self.opencti_url = os.getenv("OPENCTI_URL") or config["opencti"]["url"]
self.opencti_token = os.getenv("OPENCTI_TOKEN") or config["opencti"]["token"]
# Check if openCTI is available
self.api = OpenCTIApiClient(
self.opencti_url, self.opencti_token, self.log_level
)
# Configure logger
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: " + self.log_level)
logging.basicConfig(level=numeric_level)
# Initialize variables
self.connectors = []
self.queues = []
# Start the main loop
def start(self):
while True:
try:
# Fetch queue configuration from API
self.connectors = self.api.connector.list()
self.queues = list(map(lambda x: x["config"]["push"], self.connectors))
# Check if all queues are consumed
for connector in self.connectors:
queue = connector["config"]["push"]
if queue in self.consumer_threads:
if not self.consumer_threads[queue].is_alive():
logging.info(
"Thread for queue "
+ queue
+ " not alive, creating a new one..."
)
self.consumer_threads[queue] = Consumer(
connector,
self.opencti_url,
self.opencti_token,
)
self.consumer_threads[queue].start()
else:
self.consumer_threads[queue] = Consumer(
connector,
self.opencti_url,
self.opencti_token,
)
self.consumer_threads[queue].start()
# Check if some threads must be stopped
for thread in list(self.consumer_threads):
if thread not in self.queues:
logging.info(
"Queue " + thread + " no longer exists, killing thread..."
)
try:
self.consumer_threads[thread].terminate()
self.consumer_threads.pop(thread, None)
except:
logging.info(
"Unable to kill the thread for queue "
+ thread
+ ", an operation is running, keep trying..."
)
time.sleep(60)
except KeyboardInterrupt:
# Graceful stop
for thread in self.consumer_threads.keys():
if thread not in self.queues:
self.consumer_threads[thread].terminate()
exit(0)
except Exception as e:
logging.error(e)
time.sleep(60)
if __name__ == "__main__":
worker = Worker()
try:
worker.start()
except Exception as e:
logging.error(e)
exit(1)
|
[] |
[] |
[
"OPENCTI_TOKEN",
"WORKER_LOG_LEVEL",
"OPENCTI_URL"
] |
[]
|
["OPENCTI_TOKEN", "WORKER_LOG_LEVEL", "OPENCTI_URL"]
|
python
| 3 | 0 | |
runtests.py
|
"""
.. module:: runtests
:synopsis: Enable python setup.py test to work
"""
# flake8: noqa
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_core_utils.tests.settings'
test_dir = os.path.dirname(__file__)
sys.path.insert(0, test_dir)
def runtests():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
if hasattr(django, 'setup'):
django.setup()
failures = test_runner.run_tests([test_dir])
sys.exit(bool(failures))
if __name__ == '__main__':
runtests()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
indico/modules/oauth/__init__.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import os
from datetime import timedelta
from uuid import uuid4
from flask import session
from flask_oauthlib.provider import OAuth2Provider
from indico.core import signals
from indico.core.logger import Logger
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
class IndicoOAuth2Provider(OAuth2Provider):
def init_app(self, app):
app.config.setdefault('OAUTH2_PROVIDER_ERROR_ENDPOINT', 'oauth.oauth_errors')
app.config.setdefault('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN', int(timedelta(days=3650).total_seconds()))
app.config.setdefault('OAUTH2_PROVIDER_TOKEN_GENERATOR', lambda req: unicode(uuid4()))
super(IndicoOAuth2Provider, self).init_app(app)
oauth = IndicoOAuth2Provider()
logger = Logger.get('oauth')
@signals.menu.items.connect_via('admin-sidemenu')
def _extend_admin_menu(sender, **kwargs):
if session.user.is_admin:
return SideMenuItem('applications', _('Applications'), url_for('oauth.apps'), section='integration')
@signals.menu.items.connect_via('user-profile-sidemenu')
def _extend_profile_sidemenu(sender, user, **kwargs):
yield SideMenuItem('applications', _('Applications'), url_for('oauth.user_profile'), 40, disabled=user.is_system)
@signals.app_created.connect
def _no_ssl_required_on_debug(app, **kwargs):
if app.debug:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
@signals.users.merged.connect
def _delete_merged_user_tokens(target, source, **kwargs):
source.oauth_tokens.delete()
logger.info("All tokens for the user %s were deleted.", source)
|
[] |
[] |
[
"OAUTHLIB_INSECURE_TRANSPORT"
] |
[]
|
["OAUTHLIB_INSECURE_TRANSPORT"]
|
python
| 1 | 0 | |
guides/request-validation-python/example-3/example-3.6.x.py
|
from flask import abort, current_app, request
from functools import wraps
from twilio.request_validator import RequestValidator
import os
def validate_twilio_request(f):
"""Validates that incoming requests genuinely originated from Twilio"""
@wraps(f)
def decorated_function(*args, **kwargs):
# Create an instance of the RequestValidator class
validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))
# Validate the request using its URL, POST data,
# and X-TWILIO-SIGNATURE header
request_valid = validator.validate(
request.url,
request.form,
request.headers.get('X-TWILIO-SIGNATURE', ''))
# Continue processing the request if it's valid (or if DEBUG is True)
# and return a 403 error if it's not
if request_valid or current_app.debug:
return f(*args, **kwargs)
else:
return abort(403)
return decorated_function
|
[] |
[] |
[
"TWILIO_AUTH_TOKEN"
] |
[]
|
["TWILIO_AUTH_TOKEN"]
|
python
| 1 | 0 | |
pkg/operator/starter.go
|
package operator
import (
"fmt"
"os"
"time"
"github.com/openshift/cluster-etcd-operator/pkg/operator/installercontroller"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/operator/staticpod"
"github.com/openshift/library-go/pkg/operator/status"
"github.com/openshift/library-go/pkg/operator/v1helpers"
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned"
operatorv1client "github.com/openshift/client-go/operator/clientset/versioned"
operatorv1informers "github.com/openshift/client-go/operator/informers/externalversions"
"github.com/openshift/cluster-etcd-operator/pkg/operator/configobservation/configobservercontroller"
"github.com/openshift/cluster-etcd-operator/pkg/operator/operatorclient"
"github.com/openshift/cluster-etcd-operator/pkg/operator/resourcesynccontroller"
"github.com/openshift/cluster-etcd-operator/pkg/operator/targetconfigcontroller"
"github.com/openshift/cluster-etcd-operator/pkg/operator/v311_00_assets"
"github.com/openshift/library-go/pkg/operator/staticpod/controller/revision"
)
func RunOperator(ctx *controllercmd.ControllerContext) error {
kubeClient, err := kubernetes.NewForConfig(ctx.KubeConfig)
if err != nil {
return err
}
operatorConfigClient, err := operatorv1client.NewForConfig(ctx.KubeConfig)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(ctx.KubeConfig)
if err != nil {
return err
}
configClient, err := configv1client.NewForConfig(ctx.KubeConfig)
if err != nil {
return err
}
operatorConfigInformers := operatorv1informers.NewSharedInformerFactory(operatorConfigClient, 10*time.Minute)
kubeInformersForNamespaces := v1helpers.NewKubeInformersForNamespaces(kubeClient,
"",
operatorclient.GlobalUserSpecifiedConfigNamespace,
operatorclient.GlobalMachineSpecifiedConfigNamespace,
operatorclient.OperatorNamespace,
operatorclient.TargetNamespace,
"kube-system",
)
operatorClient := &operatorclient.OperatorClient{
Informers: operatorConfigInformers,
Client: operatorConfigClient.OperatorV1(),
}
v1helpers.EnsureOperatorConfigExists(
dynamicClient,
v311_00_assets.MustAsset("v3.11.0/etcd/operator-config.yaml"),
schema.GroupVersionResource{Group: operatorv1.GroupName, Version: operatorv1.GroupVersion.Version, Resource: "etcds"},
)
resourceSyncController, err := resourcesynccontroller.NewResourceSyncController(
operatorClient,
kubeInformersForNamespaces,
kubeClient,
ctx.EventRecorder,
)
if err != nil {
return err
}
configObserver := configobservercontroller.NewConfigObserver(
operatorClient,
operatorConfigInformers,
kubeInformersForNamespaces.InformersFor("kube-system"),
resourceSyncController,
ctx.EventRecorder,
)
targetConfigController := targetconfigcontroller.NewTargetConfigController(
os.Getenv("IMAGE"),
kubeInformersForNamespaces,
operatorConfigInformers.Operator().V1().Etcds(),
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace),
operatorConfigClient.OperatorV1(),
operatorClient,
kubeClient,
ctx.EventRecorder,
)
staticPodControllers := staticpod.NewControllers(
operatorclient.TargetNamespace,
"openshift-etcd",
"etcd-pod",
[]string{"cluster-etcd-operator", "installer"},
[]string{"cluster-etcd-operator", "prune"},
revisionConfigMaps,
revisionSecrets,
operatorClient,
v1helpers.CachedConfigMapGetter(kubeClient, kubeInformersForNamespaces),
v1helpers.CachedSecretGetter(kubeClient, kubeInformersForNamespaces),
kubeClient.CoreV1(),
kubeClient,
dynamicClient,
kubeInformersForNamespaces.InformersFor(operatorclient.TargetNamespace),
kubeInformersForNamespaces.InformersFor(""),
ctx.EventRecorder,
).WithInstallerPodMutationFn(installercontroller.MutateInstallerPod)
clusterOperatorStatus := status.NewClusterOperatorStatusController(
"etcd",
[]configv1.ObjectReference{
{Group: "operator.openshift.io", Resource: "etcds", Name: "cluster"},
{Resource: "namespaces", Name: "openshift-config"},
{Resource: "namespaces", Name: "openshift-config-managed"},
{Resource: "namespaces", Name: operatorclient.TargetNamespace},
{Resource: "namespaces", Name: "openshift-etcd-operator"},
},
configClient.ConfigV1(),
operatorClient,
status.NewVersionGetter(),
ctx.EventRecorder,
)
operatorConfigInformers.Start(ctx.Context.Done())
kubeInformersForNamespaces.Start(ctx.Context.Done())
go staticPodControllers.Run(ctx.Context.Done())
go targetConfigController.Run(1, ctx.Context.Done())
go configObserver.Run(1, ctx.Context.Done())
go clusterOperatorStatus.Run(1, ctx.Context.Done())
go resourceSyncController.Run(1, ctx.Context.Done())
<-ctx.Context.Done()
return fmt.Errorf("stopped")
}
// revisionConfigMaps is a list of configmaps that are directly copied for the current values. A different actor/controller modifies these.
// the first element should be the configmap that contains the static pod manifest
var revisionConfigMaps = []revision.RevisionResource{
{Name: "etcd-pod"},
{Name: "config"},
}
// revisionSecrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these.
var revisionSecrets = []revision.RevisionResource{}
|
[
"\"IMAGE\""
] |
[] |
[
"IMAGE"
] |
[]
|
["IMAGE"]
|
go
| 1 | 0 | |
consumer/envs.go
|
package consumer
import (
"os"
)
var redisConnectionURL = os.Getenv("REDIS_CONNECTION_URL")
|
[
"\"REDIS_CONNECTION_URL\""
] |
[] |
[
"REDIS_CONNECTION_URL"
] |
[]
|
["REDIS_CONNECTION_URL"]
|
go
| 1 | 0 | |
testbench/controller_test.py
|
import os
import os.path as path
import pytest
import cocotb as ctb
import cocotb_test.simulator as tester
class TB(object):
pass
class MicroBlaze():
pass
@ctb.test()
def controller_test(dut):
pass
tests_dir = path.dirname(__file__)
hdl_dir = path.abspath(path.join(tests_dir, '..', 'hdl'))
@pytest.mark.skipif(os.getenv("SIM") == "ghdl", reason="Verilog not suported")
def test_controller():
dut = "ctrl_top"
module = path.splitext(path.basename(__file__))[0]
parameters = {}
parameters['DATA_ADDR_WIDTH'] = 5
parameters['']
verilog_sources = [
path.join(hdl_dir, f"/controller/{dut}.v")
]
compile_args = [
"-g2005",
"-Wall"
]
includes = [
f"{hdl_dir}/src/",
f"{hdl_dir}/include/",
]
tester.run(
verilog_sources = verilog_sources,
toplevel = dut,
module = module,
compile_args = compile_args
)
# -*- coding: utf-8 -*-
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import Timer
from cocotb.regression import TestFactory
@cocotb.test()
async def run_test(dut):
PERIOD = 10
cocotb.fork(Clock(dut.clk, PERIOD, 'ns').start(start_high=False))
dut.rst = 0
dut.en = 0
dut.prog = 0
dut.iw_valid = 0
dut.load_coef_addr = 0
dut.instr_word = 0
await Timer(20*PERIOD, units='ns')
dut.rst = 1
dut.en = 1
dut.prog = 1
dut.iw_valid = 1
dut.load_coef_addr = 1
dut.instr_word = 1
dut.ptr_req = 1
dut.en_calc = 1
dut.mac_init = 1
dut.en_ram_pa = 1
dut.en_ram_pb = 1
dut.wr_ram_pa = 1
dut.wr_ram_pb = 1
dut.regf_rd = 1
dut.regf_wr = 1
dut.regf_en = 1
dut.new_in = 1
dut.new_out = 1
dut.data_addr = 1
dut.coef_addr = 1
dut.ars1 = 1
dut.ars2 = 1
dut.ard1 = 1
dut.ard2 = 1
await Timer(20*PERIOD, units='ns')
# Register the test.
factory = TestFactory(run_test)
factory.generate_tests()
|
[] |
[] |
[
"SIM"
] |
[]
|
["SIM"]
|
python
| 1 | 0 | |
pymbta3/pymbta3.py
|
import os
from functools import wraps
import inspect
from typing import Union
import requests
class PyMBTA3(object):
"""
Base class where the decorators and base function for the other classes of this python wrapper will inherit from.
"""
_MBTA_V3_API_URL = 'https://api-v3.mbta.com'
def __init__(self, key: str = None):
""" Initialize the class
Keyword Arguments:
key: MBTA v3 api key
"""
if key is None:
os.getenv('MBTA_API_KEY')
if not key or not isinstance(key, str):
raise ValueError('The MBTA-V3 API key must be provided either through the key parameter or '
'through the environment variable MBTA_API_KEY. Get a free key '
'from the MBTA website: https://api-v3.mbta.com/')
self.key = key
self.headers = {"X-API-Key": self.key, "accept": 'application/vnd.api+json'}
@classmethod
def _call_api_on_func(cls, func):
"""
Decorator for forming the api call with the arguments of the function, it works by taking the arguments
given to the function and building the url to call the api on it
Keyword Arguments:
func: The function to be decorated
"""
# Argument Handling
argspec = inspect.getfullargspec(func)
try:
# Assume most of the cases have a mixed between args and named args
positional_count = len(argspec.args) - len(argspec.defaults)
defaults = dict(zip(argspec.args[positional_count:], argspec.defaults))
except TypeError:
if argspec.args:
# No defaults
positional_count = len(argspec.args)
defaults = {}
elif argspec.defaults:
# Only defaults
positional_count = 0
defaults = argspec.defaults
# Actual decorating
@wraps(func)
def _call_wrapper(self, *args, **kwargs):
used_kwargs = kwargs.copy()
# Get the used positional arguments given to the function
used_kwargs.update(zip(argspec.args[positional_count:], args[positional_count:]))
# Update the dictionary to include the default parameters from the function
used_kwargs.update({k: used_kwargs.get(k, d) for k, d in defaults.items()})
# Form the base url, the original function called must return the function name defined in the MBTA api
function_name = func(self, *args, **kwargs)
url = f'{PyMBTA3._MBTA_V3_API_URL}/{function_name}'
for idx, arg_name in enumerate(argspec.args[1:]):
try:
arg_value = args[idx]
except IndexError:
arg_value = used_kwargs[arg_name]
if arg_value:
if arg_name == 'include':
if isinstance(arg_value, tuple) or isinstance(arg_value, list):
# If the argument is given as list, then we have to format it, you gotta format it nicely
arg_value = ','.join(arg_value)
url = '{}include={}'.format(url, arg_value)
else:
# Discard argument in the url formation if it was set to None (in other words, this will call
# the api with its internal defined parameter)
if isinstance(arg_value, tuple) or isinstance(arg_value, list):
# If the argument is given as list, then we have to format it, you gotta format it nicely
arg_value = ','.join(arg_value)
url = '{}&filter[{}]={}'.format(url, arg_name, arg_value)
return self._handle_api_call(url)
return _call_wrapper
def _handle_api_call(self, url):
"""
Handle the return call from the api and return a data and meta_data object. It raises a ValueError on problems
url: The url of the service
"""
response = requests.get(url, headers=self.headers)
json_response = response.json()
if not json_response:
raise ValueError('Error getting data from the api, no return was given.')
return json_response
class Alerts(PyMBTA3):
@PyMBTA3._call_api_on_func
def get(self,
include: Union[str, list, tuple] = None,
activity: Union[str, list, tuple] = None,
route_type: Union[str, list, tuple] = None,
direction_id: Union[str, list, tuple] = None,
route: Union[str, list, tuple] = None,
stop: Union[str, list, tuple] = None,
trip: Union[str, list, tuple] = None,
facility: Union[str, list, tuple] = None,
id: Union[str, list, tuple] = None,
banner: bool = None,
lifecycle: Union[str, list, tuple] = None,
severity: Union[str, list, tuple] = None,
datetime: str = None,):
"""
List active and upcoming system alerts
https://api-v3.mbta.com/docs/swagger/index.html#/Alert/ApiWeb_AlertController_index
Keyword Arguments:
:param include: Relationships to include. [stops, routes, trips, facilities]
Includes data from related objects in the "included" keyword
:param activity: An activity affected by an alert. ["BOARD", "USING_ESCALATOR", "PARK_CAR"... ETC]
:param route_type: Filter by route_type: https://developers.google.com/transit/gtfs/reference/routes-file.
:param direction_id: Filter by direction of travel along the route.
:param route: Filter by /data/{index}/relationships/route/data/id.
:param stop: Filter by /data/{index}/relationships/stop/data/id
:param trip: Filter by /data/{index}/relationships/trip/data/id.
:param facility: Filter by /data/{index}/relationships/facility/data/id.
:param id: Filter by multiple IDs.
:param banner: When combined with other filters, filters by alerts with or without a banner.
:param lifecycle: Filters by an alert’s lifecycle.
:param severity: Filters alerts by list of severities.
:param datetime: Filter to alerts that are active at a given time
Additionally, the string “NOW” can be used to filter to alerts that are currently active.
"""
_CALL_KEY = "alerts?"
return _CALL_KEY
class Routes(PyMBTA3):
@PyMBTA3._call_api_on_func
def get(self,
include: Union[str, list, tuple] = None,
type: Union[str, list, tuple] = None,
direction_id: Union[str, list, tuple] = None,
route: Union[str, list, tuple] = None,
stop: Union[str, list, tuple] = None,
trip: Union[str, list, tuple] = None,
id: Union[str, list, tuple] = None,
date: str = None):
"""
List active and upcoming system alerts
https://api-v3.mbta.com/docs/swagger/index.html#/Route/ApiWeb_RouteController_index
Keyword Arguments:
:param include: Relationships to include. [stops, line, route_patterns]
Includes data from related objects in the "included" keyword
:param type: Filter by route_type: https://developers.google.com/transit/gtfs/reference/routes-file.
:param direction_id: Filter by direction of travel along the route.
:param route: Filter by /data/{index}/relationships/route/data/id.
:param stop: Filter by /data/{index}/relationships/stop/data/id
:param trip: Filter by /data/{index}/relationships/trip/data/id.
:param id: Filter by multiple IDs.
:param date: Filter by date that route is active. The active date is the service date. YYYY-MM-DD
"""
_CALL_KEY = "routes?"
return _CALL_KEY
class Vehicles(PyMBTA3):
@PyMBTA3._call_api_on_func
def get(self,
include: Union[str, list, tuple] = None,
route_type: Union[str, list, tuple] = None,
direction_id: Union[str, list, tuple] = None,
route: Union[str, list, tuple] = None,
label: Union[str, list, tuple] = None,
trip: Union[str, list, tuple] = None,
id: Union[str, list, tuple] = None):
"""
List of vehicles (buses, ferries, and trains)
https://api-v3.mbta.com/docs/swagger/index.html#/Vehicle/ApiWeb_VehicleController_index
Keyword Arguments:
:param include: Relationships to include. [trip, stop, route]
Includes data from related objects in the "included" keyword
:param route_type: Filter by route_type: https://developers.google.com/transit/gtfs/reference/routes-file.
:param direction_id: Filter by direction of travel along the route.
:param route: Filter by /data/{index}/relationships/route/data/id.
:param label: Filter by label.
:param trip: Filter by /data/{index}/relationships/trip/data/id.
:param id: Filter by multiple IDs.
"""
_CALL_KEY = "vehicles?"
return _CALL_KEY
class Stops(PyMBTA3):
@PyMBTA3._call_api_on_func
def get(self,
include: Union[str, list, tuple] = None,
date: Union[str, list, tuple] = None,
direction_id: Union[str, list, tuple] = None,
latitude: Union[str, list, tuple] = None,
longitude: Union[str, list, tuple] = None,
radius: Union[str, list, tuple] = None,
id: Union[str, list, tuple] = None,
route_type: Union[str, list, tuple] = None,
route: Union[str, list, tuple] = None,
service: Union[str, list, tuple] = None,
location_type: Union[str, list, tuple] = None):
"""
List of vehicles (buses, ferries, and trains)
https://api-v3.mbta.com/docs/swagger/index.html#/Vehicle/ApiWeb_VehicleController_index
Keyword Arguments:
:param include: Relationships to include. [parent_station, child_stops, recommended_transfers, facilities, route]
Includes data from related objects in the "included" keyword
:param date: Filter by date.
:param direction_id: Filter by direction of travel along the route.
:param latitude: Latitude in degrees North
:param longitude: Longitude in degrees East
:param radius: distance in degrees
:param id: Filter by multiple IDs.
:param route_type: Filter by route_type: https://developers.google.com/transit/gtfs/reference/routes-file.
:param route: Filter by /data/{index}/relationships/route/data/id.
:param service: Filter by service id.
:param location_type: Filter by location type.
"""
_CALL_KEY = "stops?"
return _CALL_KEY
|
[] |
[] |
[
"MBTA_API_KEY"
] |
[]
|
["MBTA_API_KEY"]
|
python
| 1 | 0 | |
vaccination_app/views.py
|
import os
from django.contrib import messages
from django import forms
from django.db.models.base import Model
from django.db.models.query import QuerySet
from django.conf import settings
from django.forms.forms import Form
from typing import List
from django.contrib import messages
from django.contrib.auth import settings
from django.shortcuts import redirect, render
from django.urls.base import reverse
from django.utils.translation import templatize
from django.views.generic import TemplateView, ListView, FormView
from .registrationform import PostForm
from .vaccinecardform import PostForm2
from .otpform import PostForm4
from .models import *
from django.http import HttpResponse, request
from django.http import HttpResponseRedirect
from django.core.exceptions import ValidationError
from django.db import connection
from datetime import date, datetime
from .Twilio import sendsms
from .OTPGenerator import gen_key,generate_code,verify_code
from django.template.loader import get_template
from xhtml2pdf import pisa
from importlib import import_module
from django.contrib.sessions.backends.db import SessionStore
account_sid =os.environ['account_sid']
auth_token =os.environ['auth_token']
def HomePageView(request):
return render(request, 'home.html')
def CovidCheckView(request):
return render(request, 'covidcheck.html')
def FaqView(request):
return render(request, 'faq.html')
class AddressView(ListView):
http_method_names = ["get"]
model = Center
template_name = "centeraddress.html"
context_object_name = "centers"
queryset = Center.objects.all().order_by('center_address')
def VaccinecardView(request):
form_class = PostForm2
# if request is not post, initialize an empty form
form = form_class(request.POST or None)
if request.method == 'POST':
if form.is_valid():
search_term=form.cleaned_data['NID']
search_term2=form.cleaned_data['Date_of_Birth']
valid = Nid.objects.filter(id=search_term)
valid2 = Registration.objects.filter(nid=search_term).exists()
if valid2:
for objects in valid:
if objects.dob == search_term2:
human = True
search_term3 = Registration.objects.get(nid=search_term).mobile_no
request.session['NID'] = search_term
key = gen_key()
code = generate_code(key)
otp_obj = Otp.objects.create(
otpkey = code
)
msg_body =f'''
Covid-19 vaccine registration: Your OTP code:{code}
'''
sendsms(account_sid,auth_token,msg_body,'+19287560208','+880'+str(search_term3))
return redirect('/votp')
else:
form.add_error('Date_of_Birth', 'Your date of birth is incorrect')
else:
form.add_error('NID', 'You are not registered')
form = PostForm2()
context = {
'form': form
}
return render(request, 'vaccinecard.html', context)
# class VaccinecardView(FormView):
# template_name = "vaccinecard.html"
# form_class = PostForm2
# success_url = 'showInfo/'
# def form_valid(self, form):
# search_term=form.cleaned_data['NID']
# search_term2=form.cleaned_data['Date_of_Birth']
# valid = Nid.objects.filter(id=search_term)
# valid2 = Registration.objects.filter(nid=search_term).exists()
# if valid2:
# for objects in valid:
# if objects.dob == search_term2:
# human = True
# request.session['NID'] = 'search_term'
# return super().form_valid(form)
# else:
# form.add_error('Date_of_Birth', 'Your date of birth is incorrect')
# return self.form_invalid(form)
# else:
# form.add_error('NID', 'You are not registered')
# return self.form_invalid(form)
class RegistrationView(FormView):
template_name = "registration.html"
form_class = PostForm
success_url='/otp'
def form_valid(self, form):
search_term=form.cleaned_data['NID']
search_term2=form.cleaned_data['Date_of_Birth']
search_term3=form.cleaned_data['Phone_number']
search_term4=form.cleaned_data['Center']
search_term5 = form.cleaned_data['Category']
if search_term4 == '1':
form.add_error('Center', 'Please choose a center')
return self.form_invalid(form)
if search_term5 == '1':
form.add_error('Category', 'Please choose a Category')
return self.form_invalid(form)
today = date.today()
user_age= today.year-search_term2.year
valid = Nid.objects.filter(id=search_term)
valid2 = Registration.objects.filter(nid= search_term).exists()
valid3 =Registration.objects.filter(mobile_no=search_term3).exists()
valid4 = Nid.objects.filter(id=search_term).exists()
if valid2:
form.add_error('NID', 'You are already registered')
return self.form_invalid(form)
else:
if valid3:
form.add_error('Phone_number', 'This mobile number already registered')
return self.form_invalid(form)
else:
for objects in valid:
if valid4 and objects.dob == search_term2:
nid_obj = Nid.objects.get(id=form.cleaned_data['NID'])
center_obj = Center.objects.get(center_id=form.cleaned_data['Center'])
human = True
new_object = Registration.objects.create(
nid=nid_obj,
date = date.today(),
center=center_obj,
mobile_no=form.cleaned_data['Phone_number'],
age = user_age
)
key = gen_key()
code = generate_code(key)
otp_obj = Otp.objects.create(
otpkey = code
)
msg_body =f'''
Covid-19 vaccine registration: Your OTP code:{code}
'''
sendsms(account_sid,auth_token,msg_body,'+19287560208','+880'+search_term3)
return super().form_valid(form)
else:
form.add_error('NID', 'You are not eligible')
return self.form_invalid(form)
class OtpView(FormView):
template_name = "otp.html"
form_class=PostForm4
success_url='/'
def form_valid(self, form):
search_term5 = form.cleaned_data['OTP']
search_term6 = Otp.objects.filter(otpkey=search_term5).exists()
print(search_term6)
if search_term6:
messages.add_message(self.request, messages.SUCCESS, "You successfully registered")
return super().form_valid(form)
else:
form.add_error('OTP', 'Wrong OTP')
return self.form_invalid(form)
class VacOtpView(FormView):
template_name = "votp.html"
form_class=PostForm4
success_url='/showInfo'
def form_valid(self, form):
search_term5 = form.cleaned_data['OTP']
search_term6 = Otp.objects.filter(otpkey=search_term5).exists()
print(search_term6)
if search_term6:
return super().form_valid(form)
else:
form.add_error('OTP', 'Wrong OTP')
return self.form_invalid(form)
def show_info(request):
id = request.session['NID']
registration = Registration.objects.get(nid=id)
center_id = Registration.objects.get(nid=id).center
nid = Nid.objects.get(id=id)
address_id = Nid.objects.get(id=id).address
center = Center.objects.get(center_id = center_id.center_id)
address = Address.objects.get(id=address_id.id)
center_address= Center.objects.get(center_id = center_id.center_id).center_address
return render(request, 'ShowInfo.html', {'nid': nid, 'registration':registration,'address':address,'center':center,'center_id':center_address})
def renderpdfview(request):
id = request.session['NID']
del request.session['NID']
registration = Registration.objects.get(nid=id)
center_id = Registration.objects.get(nid=id).center
nid = Nid.objects.get(id=id)
address_id = Nid.objects.get(id=id).address
center = Center.objects.get(center_id = center_id.center_id)
address = Address.objects.get(id=address_id.id)
center_address= Center.objects.get(center_id = center_id.center_id).center_address
template_path = 'renderpdf.html' # template_path = 'user_printer.html'
# Create a Django response object, and specify content_type as pdf
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="VaccineCard.pdf"' # Attachment enables it to be downloadable
# find the template and render it.
template = get_template(template_path)
html = template.render({'nid': nid, 'registration':registration,'address':address,'center':center,'center_id':center_address})
# create a pdf
pisa_status = pisa.CreatePDF(
html, dest=response) # dest=response; destination is response
# if error then show some funy view
if pisa_status.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
|
[] |
[] |
[
"account_sid",
"auth_token"
] |
[]
|
["account_sid", "auth_token"]
|
python
| 2 | 0 | |
v2/cacheutils/cache.go
|
package cacheutils
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"google.golang.org/grpc"
"google.golang.org/protobuf/encoding/protojson"
"github.com/golang/glog"
"github.com/kubeflow/pipelines/api/v2alpha1/go/cachekey"
"github.com/kubeflow/pipelines/api/v2alpha1/go/pipelinespec"
api "github.com/kubeflow/pipelines/v2/kfp-api"
"github.com/kubeflow/pipelines/v2/third_party/ml_metadata"
)
const (
// MaxGRPCMessageSize contains max grpc message size supported by the client
MaxClientGRPCMessageSize = 100 * 1024 * 1024
// The endpoint uses Kubernetes service DNS name with namespace:
//https://kubernetes.io/docs/concepts/services-networking/service/#dns
defaultKfpApiEndpoint = "ml-pipeline.kubeflow:8887"
)
func GenerateFingerPrint(cacheKey *cachekey.CacheKey) (string, error) {
cacheKeyJsonBytes, err := protojson.Marshal(cacheKey)
if err != nil {
return "", fmt.Errorf("failed to marshal cache key with protojson: %w", err)
}
// This json unmarshal and marshal is to use encoding/json formatter to format the bytes[] returned by protojson
// Do the json formatter because of https://developers.google.com/protocol-buffers/docs/reference/go/faq#unstable-json
var v interface{}
if err := json.Unmarshal(cacheKeyJsonBytes, &v); err != nil {
return "", fmt.Errorf("failed to unmarshall cache key json bytes array: %w", err)
}
formattedCacheKeyBytes, err := json.Marshal(v)
if err != nil {
return "", fmt.Errorf("failed to marshall cache key with golang encoding/json : %w", err)
}
hash := sha256.New()
hash.Write(formattedCacheKeyBytes)
md := hash.Sum(nil)
executionHashKey := hex.EncodeToString(md)
return executionHashKey, nil
}
func GenerateCacheKey(
inputs *pipelinespec.ExecutorInput_Inputs,
outputs *pipelinespec.ExecutorInput_Outputs,
outputParametersTypeMap map[string]string,
cmdArgs []string, image string) (*cachekey.CacheKey, error) {
cacheKey := cachekey.CacheKey{
InputArtifactNames: make(map[string]*cachekey.ArtifactNameList),
InputParameters: make(map[string]*pipelinespec.Value),
OutputArtifactsSpec: make(map[string]*pipelinespec.RuntimeArtifact),
OutputParametersSpec: make(map[string]string),
}
for inputArtifactName, inputArtifactList := range inputs.GetArtifacts() {
inputArtifactNameList := cachekey.ArtifactNameList{ArtifactNames: make([]string, 0)}
for _, artifact := range inputArtifactList.Artifacts {
inputArtifactNameList.ArtifactNames = append(inputArtifactNameList.ArtifactNames, artifact.GetName())
}
cacheKey.InputArtifactNames[inputArtifactName] = &inputArtifactNameList
}
for inputParameterName, inputParameterValue := range inputs.GetParameters() {
cacheKey.InputParameters[inputParameterName] = &pipelinespec.Value{
Value: inputParameterValue.Value,
}
}
for outputArtifactName, outputArtifactList := range outputs.GetArtifacts() {
if len(outputArtifactList.Artifacts) == 0 {
continue
}
// TODO: Support multiple artifacts someday, probably through the v2 engine.
outputArtifact := outputArtifactList.Artifacts[0]
outputArtifactWithUriWiped := pipelinespec.RuntimeArtifact{
Name: outputArtifact.GetName(),
Type: outputArtifact.GetType(),
Metadata: outputArtifact.GetMetadata(),
}
cacheKey.OutputArtifactsSpec[outputArtifactName] = &outputArtifactWithUriWiped
}
for outputParameterName, _ := range outputs.GetParameters() {
outputParameterType, ok := outputParametersTypeMap[outputParameterName]
if !ok {
return nil, fmt.Errorf("unknown parameter %q found in ExecutorInput_Outputs", outputParameterName)
}
cacheKey.OutputParametersSpec[outputParameterName] = outputParameterType
}
cacheKey.ContainerSpec = &cachekey.ContainerSpec{
Image: image,
CmdArgs: cmdArgs,
}
return &cacheKey, nil
}
// Client is an KFP service client.
type Client struct {
svc api.TaskServiceClient
}
// NewClient creates a Client.
func NewClient() (*Client, error) {
cacheEndPoint := cacheDefaultEndpoint()
glog.Infof("Connecting to cache endpoint %s", cacheEndPoint)
conn, err := grpc.Dial(cacheEndPoint, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxClientGRPCMessageSize)), grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("metadata.NewClient() failed: %w", err)
}
return &Client{
svc: api.NewTaskServiceClient(conn),
}, nil
}
func cacheDefaultEndpoint() string {
// Discover ml-pipeline in the same namespace by env var.
// https://kubernetes.io/docs/concepts/services-networking/service/#environment-variables
cacheHost := os.Getenv("ML_PIPELINE_SERVICE_HOST")
cachePort := os.Getenv("ML_PIPELINE_SERVICE_PORT_GRPC")
if cacheHost != "" && cachePort != "" {
// If there is a ml-pipeline Kubernetes service in the same namespace,
// ML_PIPELINE_SERVICE_HOST and ML_PIPELINE_SERVICE_PORT env vars should
// exist by default, so we use it as default.
return cacheHost + ":" + cachePort
}
// If the env vars do not exist, use default ml-pipeline grpc endpoint `ml-pipeline.kubeflow:8887`.
glog.Infof("Cannot detect ml-pipeline in the same namespace, default to %s as KFP endpoint.", defaultKfpApiEndpoint)
return defaultKfpApiEndpoint
}
func (c *Client) GetExecutionCache(fingerPrint, pipelineName string) (string, error) {
fingerPrintPredicate := &api.Predicate{
Op: api.Predicate_EQUALS,
Key: "fingerprint",
Value: &api.Predicate_StringValue{StringValue: fingerPrint},
}
pipelineNamePredicate := &api.Predicate{
Op: api.Predicate_EQUALS,
Key: "pipelineName",
Value: &api.Predicate_StringValue{StringValue: pipelineName},
}
filter := api.Filter{Predicates: []*api.Predicate{fingerPrintPredicate, pipelineNamePredicate}}
taskFilterJson, err := protojson.Marshal(&filter)
if err != nil {
return "", fmt.Errorf("failed to convert filter into JSON: %w", err)
}
listTasksReuqest := &api.ListTasksRequest{Filter: string(taskFilterJson), SortBy: "created_at desc", PageSize: 1}
listTasksResponse, err := c.svc.ListTasks(context.Background(), listTasksReuqest)
if err != nil {
return "", fmt.Errorf("failed to list tasks: %w", err)
}
tasks := listTasksResponse.Tasks
if len(tasks) == 0 {
return "", nil
} else {
return tasks[0].GetMlmdExecutionID(), nil
}
}
func (c *Client) CreateExecutionCache(ctx context.Context, task *api.Task) error {
req := &api.CreateTaskRequest{
Task: task,
}
_, err := c.svc.CreateTask(ctx, req)
if err != nil {
return fmt.Errorf("failed to create task: %w", err)
}
return nil
}
func GetMLMDOutputParams(cachedExecution *ml_metadata.Execution) (map[string]string, error) {
mlmdOutputParameters := make(map[string]string)
for customPropName, customPropValue := range cachedExecution.CustomProperties {
if strings.HasPrefix(customPropName, "output:") {
slice := strings.Split(customPropName, ":")
if len(slice) != 2 {
return nil, fmt.Errorf("failed to parse output parameter from MLMD execution custom property %v", customPropName)
}
outputParamName := slice[1]
var outputParamValue string
switch t := customPropValue.Value.(type) {
case *ml_metadata.Value_StringValue:
outputParamValue = customPropValue.GetStringValue()
case *ml_metadata.Value_DoubleValue:
outputParamValue = strconv.FormatFloat(customPropValue.GetDoubleValue(), 'f', -1, 64)
case *ml_metadata.Value_IntValue:
outputParamValue = strconv.FormatInt(customPropValue.GetIntValue(), 10)
default:
return nil, fmt.Errorf("unknown PipelineSpec Value type %T", t)
}
mlmdOutputParameters[outputParamName] = outputParamValue
}
}
return mlmdOutputParameters, nil
}
|
[
"\"ML_PIPELINE_SERVICE_HOST\"",
"\"ML_PIPELINE_SERVICE_PORT_GRPC\""
] |
[] |
[
"ML_PIPELINE_SERVICE_HOST",
"ML_PIPELINE_SERVICE_PORT_GRPC"
] |
[]
|
["ML_PIPELINE_SERVICE_HOST", "ML_PIPELINE_SERVICE_PORT_GRPC"]
|
go
| 2 | 0 | |
examples/09-create-customer-payment.py
|
# coding=utf-8
#
# Example: Creating a payment for a customer
#
from __future__ import print_function
from __future__ import absolute_import
import os
import time
import flask
from app import database_write
from mollie.api.client import Client
from mollie.api.error import Error
def main():
try:
#
# Initialize the Mollie API library with your API key.
#
# See: https://www.mollie.com/dashboard/settings/profiles
#
api_key = os.environ.get("MOLLIE_API_KEY", "test_test")
mollie_client = Client()
mollie_client.set_api_key(api_key)
body = ""
customer_id = flask.request.args.get("customer_id")
# If no customer ID was provided in the URL, we grab the first customer
if customer_id is None:
customers = mollie_client.customers.list()
body += (
"<p>No customer ID specified. Attempting to retrieve the first page of "
)
body += "customers and grabbing the first.</p>"
if not len(customers):
body += "<p>You have no customers. You can create one from the examples.</p>"
return body
customer = next(customers)
else:
customer = mollie_client.customers.get(customer_id)
#
# Generate a unique webshop order number for this example. It is important to include this unique attribute
# in the redirectUrl (below) so a proper return page can be shown to the customer.
#
my_webshop_id = int(time.time())
#
# See: https://www.mollie.com/nl/docs/reference/customers/create-payment
#
payment = mollie_client.customer_payments.with_parent_id(customer.id).create(
{
"amount": {"currency": "EUR", "value": "100.00"},
"description": "My first API payment",
"webhookUrl": "{root}02-webhook_verification".format(
root=flask.request.url_root
),
"redirectUrl": "{root}03-return-page?my_webshop_id={id}".format(
root=flask.request.url_root, id=my_webshop_id
),
"metadata": {"my_webshop_id": str(my_webshop_id)},
}
)
data = {"status": payment.status}
database_write(my_webshop_id, data)
return "<p>Created payment of {curr} {value} for {cust} ({id})<p>".format(
curr=payment.amount["currency"],
value=payment.amount["value"],
cust=customer.name,
id=customer.id,
)
except Error as err:
return "API call failed: {error}".format(error=err)
if __name__ == "__main__":
print(main())
|
[] |
[] |
[
"MOLLIE_API_KEY"
] |
[]
|
["MOLLIE_API_KEY"]
|
python
| 1 | 0 | |
scripts/train_model.py
|
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from azureml.core.run import Run
import os
import argparse
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import joblib
import json
def train_model(run, data, alpha):
run.log("alpha", alpha)
run.parent.log("alpha", alpha)
reg = Ridge(alpha=alpha)
reg.fit(data["train"]["X"], data["train"]["y"])
preds = reg.predict(data["test"]["X"])
run.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
run.parent.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
return reg
def main():
print("Running train.py")
parser = argparse.ArgumentParser("train")
parser.add_argument(
"--build_id",
type=str,
help="The build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="sklearn_regression_model.pkl",
)
parser.add_argument(
"--step_output",
type=str,
help=("output for passing data to next step")
)
args = parser.parse_args()
print("Argument [build_id]: %s" % args.build_id)
print("Argument [model_name]: %s" % args.model_name)
print("Argument [step_output]: %s" % args.step_output)
model_name = args.model_name
build_id = args.build_id
step_output_path = args.step_output
print("Getting training parameters")
alpha = 0.5
print("Parameter alpha: %s" % alpha)
run = Run.get_context()
# Get the dataset
dataset = run.input_datasets['training_data']
if (dataset):
df = dataset.to_pandas_dataframe()
X = df.values
y = df.Y
else:
e = ("No dataset provided")
print(e)
raise Exception(e)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
data = {"train": {"X": X_train, "y": y_train},
"test": {"X": X_test, "y": y_test}}
reg = train_model(run, data, alpha)
# Pass model file to next step
os.makedirs(step_output_path, exist_ok=True)
model_output_path = os.path.join(step_output_path, model_name)
joblib.dump(value=reg, filename=model_output_path)
# Also upload model file to run outputs for history
os.makedirs('outputs', exist_ok=True)
output_path = os.path.join('outputs', model_name)
joblib.dump(value=reg, filename=output_path)
# Add properties to identify this specific training run
run.parent.tag("BuildId", value=build_id)
run.tag("BuildId", value=build_id)
run.tag("run_type", value="train")
builduri_base = os.environ.get("BUILDURI_BASE")
if (builduri_base is not None):
build_uri = builduri_base + build_id
run.tag("BuildUri", value=build_uri)
run.parent.tag("BuildUri", value=build_uri)
print(f"tags now present for run: {run.tags}")
run.complete()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BUILDURI_BASE"
] |
[]
|
["BUILDURI_BASE"]
|
python
| 1 | 0 | |
test/e2e/testutils/tester/tester.go
|
package tester
import (
"context"
"os"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"github.com/liqotech/liqo/pkg/utils"
)
// Tester is used to encapsulate the context where the test is executed.
type Tester struct {
Clusters []ClusterContext
Namespace string
}
// ClusterContext encapsulate all information and objects used to access a test cluster.
type ClusterContext struct {
Config *rest.Config
Client *kubernetes.Clientset
ClusterID string
KubeconfigPath string
}
var (
tester *Tester
)
// GetTester returns a Tester instance.
func GetTester(ctx context.Context) *Tester {
if tester == nil {
tester = createTester(ctx)
}
return tester
}
func createTester(ctx context.Context) *Tester {
kubeconfig1 := os.Getenv("KUBECONFIG_1")
if kubeconfig1 == "" {
klog.Error("KUBECONFIG_1 not set")
os.Exit(1)
}
kubeconfig2 := os.Getenv("KUBECONFIG_2")
if kubeconfig2 == "" {
klog.Error("KUBECONFIG_2 not set")
os.Exit(1)
}
namespace := os.Getenv("NAMESPACE")
if namespace == "" {
klog.Error("NAMESPACE not set")
os.Exit(1)
}
config1, err := clientcmd.BuildConfigFromFlags("", kubeconfig1)
if err != nil {
klog.Error(err)
os.Exit(1)
}
config2, err := clientcmd.BuildConfigFromFlags("", kubeconfig2)
if err != nil {
klog.Error(err)
os.Exit(1)
}
clientset1, err := kubernetes.NewForConfig(config1)
if err != nil {
klog.Error(err)
os.Exit(1)
}
clientset2, err := kubernetes.NewForConfig(config2)
if err != nil {
klog.Error(err)
os.Exit(1)
}
clusterID1, err := utils.GetClusterIDWithNativeClient(ctx, clientset1, namespace)
if err != nil {
klog.Warningf("an error occurred while getting cluster-id configmap %s", err)
clusterID1 = ""
}
clusterID2, err := utils.GetClusterIDWithNativeClient(ctx, clientset2, namespace)
if err != nil {
klog.Warningf("an error occurred while getting cluster-id configmap %s", err)
clusterID2 = ""
}
return &Tester{
Namespace: namespace,
Clusters: []ClusterContext{
{
Config: config1,
KubeconfigPath: kubeconfig1,
Client: clientset1,
ClusterID: clusterID1,
},
{
Config: config2,
KubeconfigPath: kubeconfig2,
Client: clientset2,
ClusterID: clusterID2,
},
},
}
}
|
[
"\"KUBECONFIG_1\"",
"\"KUBECONFIG_2\"",
"\"NAMESPACE\""
] |
[] |
[
"KUBECONFIG_1",
"NAMESPACE",
"KUBECONFIG_2"
] |
[]
|
["KUBECONFIG_1", "NAMESPACE", "KUBECONFIG_2"]
|
go
| 3 | 0 | |
help.go
|
package cli
import (
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"text/template"
"unicode/utf8"
)
var helpCommand = Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
ShowAppHelp(c)
return nil
},
}
var helpSubcommand = Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
return ShowSubcommandHelp(c)
},
}
// Prints help for the App or Command
type helpPrinter func(w io.Writer, templ string, data interface{})
// Prints help for the App or Command with custom template function.
type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
// HelpPrinter is a function that writes the help output. If not set a default
// is used. The function signature is:
// func(w io.Writer, templ string, data interface{})
var HelpPrinter helpPrinter = printHelp
// HelpPrinterCustom is same as HelpPrinter but
// takes a custom function for template function map.
var HelpPrinterCustom helpPrinterCustom = printHelpCustom
// VersionPrinter prints the version for the App
var VersionPrinter = printVersion
// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
func ShowAppHelpAndExit(c *Context, exitCode int) {
ShowAppHelp(c)
os.Exit(exitCode)
}
// ShowAppHelp is an action that displays the help.
func ShowAppHelp(c *Context) (err error) {
if c.App.CustomAppHelpTemplate == "" {
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
return
}
customAppData := func() map[string]interface{} {
if c.App.ExtraInfo == nil {
return nil
}
return map[string]interface{}{
"ExtraInfo": c.App.ExtraInfo,
}
}
HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData())
return nil
}
// DefaultAppComplete prints the list of subcommands as the default app completion method
func DefaultAppComplete(c *Context) {
DefaultCompleteWithFlags(nil)(c)
}
func printCommandSuggestions(commands []Command, writer io.Writer) {
for _, command := range commands {
if command.Hidden {
continue
}
if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" {
for _, name := range command.Names() {
fmt.Fprintf(writer, "%s:%s\n", name, command.Usage)
}
} else {
for _, name := range command.Names() {
fmt.Fprintf(writer, "%s\n", name)
}
}
}
}
func cliArgContains(flagName string) bool {
for _, name := range strings.Split(flagName, ",") {
name = strings.TrimSpace(name)
count := utf8.RuneCountInString(name)
if count > 2 {
count = 2
}
flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
for _, a := range os.Args {
if a == flag {
return true
}
}
}
return false
}
func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) {
cur := strings.TrimPrefix(lastArg, "-")
cur = strings.TrimPrefix(cur, "-")
for _, flag := range flags {
if bflag, ok := flag.(BoolFlag); ok && bflag.Hidden {
continue
}
for _, name := range strings.Split(flag.GetName(), ",") {
name = strings.TrimSpace(name)
// this will get total count utf8 letters in flag name
count := utf8.RuneCountInString(name)
if count > 2 {
count = 2 // resuse this count to generate single - or -- in flag completion
}
// if flag name has more than one utf8 letter and last argument in cli has -- prefix then
// skip flag completion for short flags example -v or -x
if strings.HasPrefix(lastArg, "--") && count == 1 {
continue
}
// match if last argument matches this flag and it is not repeated
if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(flag.GetName()) {
flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name)
fmt.Fprintln(writer, flagCompletion)
}
}
}
}
func DefaultCompleteWithFlags(cmd *Command) func(c *Context) {
return func(c *Context) {
if len(os.Args) > 2 {
lastArg := os.Args[len(os.Args)-2]
if strings.HasPrefix(lastArg, "-") {
printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer)
if cmd != nil {
printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer)
}
return
}
}
if cmd != nil {
printCommandSuggestions(cmd.Subcommands, c.App.Writer)
} else {
printCommandSuggestions(c.App.Commands, c.App.Writer)
}
}
}
// ShowCommandHelpAndExit - exits with code after showing help
func ShowCommandHelpAndExit(c *Context, command string, code int) {
ShowCommandHelp(c, command)
os.Exit(code)
}
// ShowCommandHelp prints help for the given command
func ShowCommandHelp(ctx *Context, command string) error {
// show the subcommand help for a command with subcommands
if command == "" {
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
return nil
}
for _, c := range ctx.App.Commands {
if c.HasName(command) {
if c.CustomHelpTemplate != "" {
HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil)
} else {
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
}
return nil
}
}
if ctx.App.CommandNotFound == nil {
return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
}
ctx.App.CommandNotFound(ctx, command)
return nil
}
// ShowSubcommandHelp prints help for the given subcommand
func ShowSubcommandHelp(c *Context) error {
return ShowCommandHelp(c, c.Command.Name)
}
// ShowVersion prints the version number of the App
func ShowVersion(c *Context) {
VersionPrinter(c)
}
func printVersion(c *Context) {
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
}
// ShowCompletions prints the lists of commands within a given context
func ShowCompletions(c *Context) {
a := c.App
if a != nil && a.BashComplete != nil {
a.BashComplete(c)
}
}
// ShowCommandCompletions prints the custom completions for a given command
func ShowCommandCompletions(ctx *Context, command string) {
c := ctx.App.Command(command)
if c != nil {
if c.BashComplete != nil {
c.BashComplete(ctx)
} else {
DefaultCompleteWithFlags(c)(ctx)
}
}
}
func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) {
funcMap := template.FuncMap{
"join": strings.Join,
}
for key, value := range customFunc {
funcMap[key] = value
}
w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
err := t.Execute(w, data)
if err != nil {
// If the writer is closed, t.Execute will fail, and there's nothing
// we can do to recover.
if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
}
return
}
w.Flush()
}
func printHelp(out io.Writer, templ string, data interface{}) {
printHelpCustom(out, templ, data, nil)
}
func checkVersion(c *Context) bool {
found := false
if VersionFlag.GetName() != "" {
eachName(VersionFlag.GetName(), func(name string) {
if c.GlobalBool(name) || c.Bool(name) {
found = true
}
})
}
return found
}
func checkHelp(c *Context) bool {
found := false
if HelpFlag.GetName() != "" {
eachName(HelpFlag.GetName(), func(name string) {
if c.GlobalBool(name) || c.Bool(name) {
found = true
}
})
}
return found
}
func checkCommandHelp(c *Context, name string) bool {
if c.Bool("h") || c.Bool("help") {
ShowCommandHelp(c, name)
return true
}
return false
}
func checkSubcommandHelp(c *Context) bool {
if c.Bool("h") || c.Bool("help") {
ShowSubcommandHelp(c)
return true
}
return false
}
func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
if !a.EnableBashCompletion {
return false, arguments
}
pos := len(arguments) - 1
lastArg := arguments[pos]
if lastArg != "--"+BashCompletionFlag.GetName() {
return false, arguments
}
return true, arguments[:pos]
}
func checkCompletions(c *Context) bool {
if !c.shellComplete {
return false
}
if args := c.Args(); args.Present() {
name := args.First()
if cmd := c.App.Command(name); cmd != nil {
// let the command handle the completion
return false
}
}
ShowCompletions(c)
return true
}
func checkCommandCompletions(c *Context, name string) bool {
if !c.shellComplete {
return false
}
ShowCommandCompletions(c, name)
return true
}
|
[
"\"_CLI_ZSH_AUTOCOMPLETE_HACK\"",
"\"CLI_TEMPLATE_ERROR_DEBUG\""
] |
[] |
[
"CLI_TEMPLATE_ERROR_DEBUG",
"_CLI_ZSH_AUTOCOMPLETE_HACK"
] |
[]
|
["CLI_TEMPLATE_ERROR_DEBUG", "_CLI_ZSH_AUTOCOMPLETE_HACK"]
|
go
| 2 | 0 | |
cmd/cli/signyamlverify_test.go
|
// The code in this file were adapted from the following original source to test sign YAML files.
// The original source: https://github.com/sigstore/cosign/blob/main/test/e2e_test.go
package cli
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/pkg/errors"
"github.com/sigstore/cosign/pkg/cosign"
)
var (
ishiedlRoot = os.Getenv("ISHIELD_REPO_ROOT")
testPayloadPath = ishiedlRoot + "/cmd/test/data/test_configmap.yaml"
testSignedPayloadPath = ishiedlRoot + "/cmd/test/data/test_configmap.yaml.signed"
)
var passFunc = func(_ bool) ([]byte, error) {
return []byte("sign yaml test"), nil
}
var signYaml = func(keyRef, payloadPath string) error {
cmd := SignYamlCommand{
KeyRef: keyRef,
PayloadPath: payloadPath,
Sk: false,
Pf: passFunc,
}
return cmd.Exec(context.Background(), nil)
}
var signYamlVerify = func(keyRef, payloadPath string) error {
cmd := VerifyYamlCommand{
CheckClaims: true,
KeyRef: keyRef,
Sk: false,
Output: "json",
PayloadPath: payloadPath,
}
return cmd.Exec(context.Background(), nil)
}
func TestSignYamlVerify(t *testing.T) {
tmpDir := t.TempDir()
// generate key pairs
privKeyPath, pubKeyPath := generateKeypair(t, tmpDir)
// Verify yaml must fail at first
mustFail(signYamlVerify(pubKeyPath, testPayloadPath), t)
// Now sign the yaml file; this must pass
mustPass(signYaml(privKeyPath, testPayloadPath), t)
// Verify yaml, this must pass
mustPass(signYamlVerify(pubKeyPath, testSignedPayloadPath), t)
cleanUp(testSignedPayloadPath)
}
func generateKeypair(t *testing.T, tmpDir string) (string, string) {
keys, err := cosign.GenerateKeyPair(passFunc)
if err != nil {
t.Fatal(err)
}
privKeyPath := filepath.Join(tmpDir, "cosign.key")
err = ioutil.WriteFile(privKeyPath, keys.PrivateBytes, 0600)
if err != nil {
t.Fatal(err)
}
pubKeyPath := filepath.Join(tmpDir, "cosign.pub")
if err = ioutil.WriteFile(pubKeyPath, keys.PublicBytes, 0600); err != nil {
t.Fatal(err)
}
return privKeyPath, pubKeyPath
}
func mustFail(err error, t *testing.T) {
t.Helper()
if err == nil {
t.Fatal(err)
}
}
func mustPass(err error, t *testing.T) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func cleanUp(path string) error {
err := os.Remove(path)
if err != nil {
return errors.Wrap(err, "failed to remove signed file")
}
return nil
}
|
[
"\"ISHIELD_REPO_ROOT\""
] |
[] |
[
"ISHIELD_REPO_ROOT"
] |
[]
|
["ISHIELD_REPO_ROOT"]
|
go
| 1 | 0 | |
functions/source/StartGlueJobFunction/glue_trigger_lambda.py
|
import os
import logging
import boto3
import time
logger = logging.Logger(__name__)
glue_client = boto3.client("glue")
JOB_NAME = os.environ.get("JOB_NAME")
CRAWLER_NAME = os.environ.get("CRAWLER_NAME")
CRAWL_TIMEOUT = 800
CRAWL_WAIT_INTERVAL = 30
def handler(event, context):
logger.debug("Starting Crawler")
glue_client.start_crawler(Name=CRAWLER_NAME)
elapsed_time = 0
crawler_status = None
crawler_succeeded = False
while not crawler_status != "running" and elapsed_time < CRAWL_TIMEOUT:
time.sleep(CRAWL_WAIT_INTERVAL)
response = glue_client.get_crawler(Name=CRAWLER_NAME)
crawler_status = response["State"].lower()
elapsed_time += CRAWL_WAIT_INTERVAL
if (
crawler_status == "ready"
and response["LastCrawl"]["Status"].lower() == "succeeded"
):
crawler_succeeded = True
if crawler_succeeded:
response = glue_client.start_job_run(JobName=JOB_NAME)
else:
logger.debug("Crawler did not succeeed")
|
[] |
[] |
[
"JOB_NAME",
"CRAWLER_NAME"
] |
[]
|
["JOB_NAME", "CRAWLER_NAME"]
|
python
| 2 | 0 | |
cmd/tendermint/commands/root.go
|
package commands
import (
"errors"
"os"
"os/user"
"path/filepath"
"github.com/bcbchain/tendermint/state"
"github.com/bcbchain/tendermint/sidechain"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/bcbchain/bclib/tendermint/tmlibs/cli"
tmflags "github.com/bcbchain/bclib/tendermint/tmlibs/cli/flags"
"github.com/bcbchain/bclib/tendermint/tmlibs/log"
cfg "github.com/bcbchain/tendermint/config"
)
var (
config = cfg.DefaultConfig()
logger = (log.Logger)(nil)
output = (*os.File)(nil)
)
func init() {
registerFlagsRootCmd(RootCmd)
sidechain.ConfigPathFunc = GetConfigFiles
state.SetConfigFunc = SetConfig
}
func registerFlagsRootCmd(cmd *cobra.Command) {
cmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
// For log customization, to support Log file
cmd.PersistentFlags().String("log_file", config.LogFile, "Log file")
}
// ParseConfig retrieves the default environment configuration,
// sets up the Tendermint root and ensures that the root exists
func ParseConfig(isInit bool) (*cfg.Config, error) {
conf := cfg.DefaultConfig()
confStat, err0 := os.Stat(conf.ConfigFilePath())
genStat, err1 := os.Stat(conf.GenesisFile())
if err0 == nil && confStat.Mode().IsRegular() && err1 == nil && genStat.Mode().IsRegular() {
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
return conf, nil
}
tmHome := os.Getenv("TMHOME")
conf.SetRoot(tmHome)
confStat, err0 = os.Stat(conf.ConfigFilePath())
genStat, err1 = os.Stat(conf.GenesisFile())
if err0 == nil && confStat.Mode().IsRegular() && err1 == nil && genStat.Mode().IsRegular() {
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
return conf, nil
}
pwd, err := os.Getwd()
if err == nil {
conf.SetRoot(pwd)
confStat, err0 = os.Stat(conf.ConfigFilePath())
genStat, err1 = os.Stat(conf.GenesisFile())
if err0 == nil && confStat.Mode().IsRegular() && err1 == nil && genStat.Mode().IsRegular() {
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
return conf, nil
}
}
usr, err := user.Current()
if err == nil {
conf.SetRoot(filepath.Join(usr.HomeDir, ".tendermint"))
confStat, err0 = os.Stat(conf.ConfigFilePath())
genStat, err1 = os.Stat(conf.GenesisFile())
if err0 == nil && confStat.Mode().IsRegular() && err1 == nil && genStat.Mode().IsRegular() {
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
return conf, nil
}
}
if !isInit {
return nil, errors.New("you must init genesis")
} else {
if tmHome != "" {
conf.SetRoot(tmHome)
} else {
conf.SetRoot(filepath.Join(usr.HomeDir, ".tendermint"))
}
cfg.EnsureRoot(conf.RootDir)
return conf, nil
}
}
// RootCmd is the root command for Tendermint core.
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
if cmd.Name() == VersionCmd.Name() {
return nil
}
config, err = ParseConfig(cmd == InitFilesCmd)
if err != nil {
return err
}
if len(config.LogFile) > 0 {
output, err = os.OpenFile(config.LogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return err
}
} else {
output = os.Stdout
}
logger1 := log.NewTMLogger(config.LogDir(), "tmcore")
logger1.SetOutputAsync(true)
logger1.SetWithThreadID(true)
logger1.AllowLevel("debug")
logger = logger1
logger, err = tmflags.ParseLogLevel(config.LogFile, config.LogLevel, logger, cfg.DefaultLogLevel())
if err != nil {
return err
}
if viper.GetBool(cli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
logger = logger.With("module", "main")
return nil
},
}
func GetConfig() *cfg.Config {
return config
}
func GetConfigFiles() (string, string, string, string, string) {
return config.GenesisFile(), config.ConfigFilePath(), config.DBDir(), config.ValidatorsFile(), config.PrivValidatorFile()
}
func SetConfig(CreateEmptyBlocks bool, ForceIntervalBlockSwitch bool, CreateEmptyBlocksInterval int) (*cfg.Config, string) {
cfgConsensus := config.Consensus
cfgMempool := config.Mempool
cfgConsensus.CreateEmptyBlocks = CreateEmptyBlocks
cfgMempool.ForceIntervalBlockSwitch = ForceIntervalBlockSwitch
cfgConsensus.CreateEmptyBlocksInterval = CreateEmptyBlocksInterval
_, configFilePath, _, _, _ := GetConfigFiles()
return GetConfig(), configFilePath
}
|
[
"\"TMHOME\""
] |
[] |
[
"TMHOME"
] |
[]
|
["TMHOME"]
|
go
| 1 | 0 | |
main.go
|
package main
// Utility to automatically backup factorio save files
// Intends to be run as a sidecar with the factorio server
// much of this from the fsnotify examples and minio examples
import (
"context"
"fmt"
"github.com/fsnotify/fsnotify"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"io/ioutil"
"log"
"os"
"sort"
)
func backup(mc *minio.Client, ctx context.Context, bucketName string, dir string) error {
files := sortedFileList(dir)
latestSavePath := dir + "/" + files[0].Name()
objectName := fmt.Sprintf("_autosaveXX-%d.zip", files[0].ModTime().Unix())
fmt.Println(bucketName, objectName, latestSavePath)
n, err := mc.FPutObject(ctx, bucketName, objectName, latestSavePath, minio.PutObjectOptions{ContentType: "application/zip"})
if err != nil {
return err
}
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
return nil
}
func sortedFileList(dir string) []os.FileInfo {
files, err := ioutil.ReadDir(dir)
if err != nil {
log.Fatal(err)
}
sort.Slice(files, func(a, b int) bool {
return files[a].ModTime().After(files[b].ModTime())
})
fmt.Println(files)
/*
// in case I ever doubt the above code
for _, file := range files {
fmt.Println(file.ModTime(), file.Name())
}
*/
return files
}
func main() {
fmt.Println("-=Factorio Save Backuper Sidecar=-")
ctx := context.Background()
// S3_ENDPOINT="localhost:9000"
// S3_ENDPOINT="minio.minio:9000"
endpoint := os.Getenv("S3_ENDPOINT")
// ACCESS_KEY_ID="xxx"
accessKeyID := os.Getenv("ACCESS_KEY_ID")
// SECRET_ACCESS_KEY="xxx"
secretAccessKey := os.Getenv("SECRET_ACCESS_KEY")
// SAVES_DIRECTORY="/factorio/saves"
savesDirectory := os.Getenv("SAVES_DIRECTORY")
// FSID="abcdabcd"
// 'factorio server id'
FSID := os.Getenv("FSID")
fmt.Println("Watching save files in:", savesDirectory)
// use this in the future
useSSL := false
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
if err != nil {
log.Fatalln(err)
}
log.Printf("%#v\n", minioClient) // minioClient is now set up
// Make a new bucket called mymusic.
bucketName := "factorio-saves-" + FSID
location := "us-east-1"
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
if err != nil {
// Check to see if we already own this bucket
exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
if errBucketExists == nil && exists {
log.Printf("We already own %s\n", bucketName)
} else {
log.Fatalln(err)
}
} else {
log.Printf("Successfully created %s\n", bucketName)
}
//inotify yeet
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
done := make(chan bool)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Op&fsnotify.Remove == fsnotify.Remove {
// Fire backups on the "remove" event
// "remove" catches the final step in the factorio backup sequence
// after autosave3.new.zip replaces autosave3.zip and then
// autosave3.bak.zip is removed. Thus we know the file is completely
// written and the newest file in the directory is the latest save
// file
log.Println("removed a file:", event.Name)
log.Println("Running backup code")
err = backup(minioClient, ctx, bucketName, savesDirectory)
if err != nil {
log.Fatal(err)
}
}
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("modified file:", event.Name)
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", err)
}
}
}()
err = watcher.Add(savesDirectory)
if err != nil {
log.Fatal(err)
}
<-done
}
|
[
"\"S3_ENDPOINT\"",
"\"ACCESS_KEY_ID\"",
"\"SECRET_ACCESS_KEY\"",
"\"SAVES_DIRECTORY\"",
"\"FSID\""
] |
[] |
[
"SECRET_ACCESS_KEY",
"FSID",
"S3_ENDPOINT",
"SAVES_DIRECTORY",
"ACCESS_KEY_ID"
] |
[]
|
["SECRET_ACCESS_KEY", "FSID", "S3_ENDPOINT", "SAVES_DIRECTORY", "ACCESS_KEY_ID"]
|
go
| 5 | 0 | |
pkg/runtime/cli.go
|
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/dapr/kit/logger"
"github.com/dapr/dapr/pkg/acl"
resiliency_v1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1"
global_config "github.com/dapr/dapr/pkg/config"
env "github.com/dapr/dapr/pkg/config/env"
"github.com/dapr/dapr/pkg/cors"
"github.com/dapr/dapr/pkg/grpc"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/operator/client"
operator_v1 "github.com/dapr/dapr/pkg/proto/operator/v1"
resiliency_config "github.com/dapr/dapr/pkg/resiliency"
"github.com/dapr/dapr/pkg/runtime/security"
"github.com/dapr/dapr/pkg/version"
"github.com/dapr/dapr/utils"
)
// FromFlags parses command flags and returns DaprRuntime instance.
func FromFlags() (*DaprRuntime, error) {
mode := flag.String("mode", string(modes.StandaloneMode), "Runtime mode for Dapr")
daprHTTPPort := flag.String("dapr-http-port", fmt.Sprintf("%v", DefaultDaprHTTPPort), "HTTP port for Dapr API to listen on")
daprAPIListenAddresses := flag.String("dapr-listen-addresses", DefaultAPIListenAddress, "One or more addresses for the Dapr API to listen on, CSV limited")
daprPublicPort := flag.String("dapr-public-port", "", "Public port for Dapr Health and Metadata to listen on")
daprAPIGRPCPort := flag.String("dapr-grpc-port", fmt.Sprintf("%v", DefaultDaprAPIGRPCPort), "gRPC port for the Dapr API to listen on")
daprInternalGRPCPort := flag.String("dapr-internal-grpc-port", "", "gRPC port for the Dapr Internal API to listen on")
appPort := flag.String("app-port", "", "The port the application is listening on")
profilePort := flag.String("profile-port", fmt.Sprintf("%v", DefaultProfilePort), "The port for the profile server")
appProtocol := flag.String("app-protocol", string(HTTPProtocol), "Protocol for the application: grpc or http")
componentsPath := flag.String("components-path", "", "Path for components directory. If empty, components will not be loaded. Self-hosted mode only")
config := flag.String("config", "", "Path to config file, or name of a configuration object")
appID := flag.String("app-id", "", "A unique ID for Dapr. Used for Service Discovery and state")
controlPlaneAddress := flag.String("control-plane-address", "", "Address for a Dapr control plane")
sentryAddress := flag.String("sentry-address", "", "Address for the Sentry CA service")
placementServiceHostAddr := flag.String("placement-host-address", "", "Addresses for Dapr Actor Placement servers")
allowedOrigins := flag.String("allowed-origins", cors.DefaultAllowedOrigins, "Allowed HTTP origins")
enableProfiling := flag.Bool("enable-profiling", false, "Enable profiling")
runtimeVersion := flag.Bool("version", false, "Prints the runtime version")
buildInfo := flag.Bool("build-info", false, "Prints the build info")
waitCommand := flag.Bool("wait", false, "wait for Dapr outbound ready")
appMaxConcurrency := flag.Int("app-max-concurrency", -1, "Controls the concurrency level when forwarding requests to user code")
enableMTLS := flag.Bool("enable-mtls", false, "Enables automatic mTLS for daprd to daprd communication channels")
appSSL := flag.Bool("app-ssl", false, "Sets the URI scheme of the app to https and attempts an SSL connection")
daprHTTPMaxRequestSize := flag.Int("dapr-http-max-request-size", -1, "Increasing max size of request body in MB to handle uploading of big files. By default 4 MB.")
unixDomainSocket := flag.String("unix-domain-socket", "", "Path to a unix domain socket dir mount. If specified, Dapr API servers will use Unix Domain Sockets")
daprHTTPReadBufferSize := flag.Int("dapr-http-read-buffer-size", -1, "Increasing max size of read buffer in KB to handle sending multi-KB headers. By default 4 KB.")
daprHTTPStreamRequestBody := flag.Bool("dapr-http-stream-request-body", false, "Enables request body streaming on http server")
daprGracefulShutdownSeconds := flag.Int("dapr-graceful-shutdown-seconds", -1, "Graceful shutdown time in seconds.")
enableAPILogging := flag.Bool("enable-api-logging", false, "Enable API logging for API calls")
loggerOptions := logger.DefaultOptions()
loggerOptions.AttachCmdFlags(flag.StringVar, flag.BoolVar)
metricsExporter := metrics.NewExporter(metrics.DefaultMetricNamespace)
metricsExporter.Options().AttachCmdFlags(flag.StringVar, flag.BoolVar)
flag.Parse()
if *runtimeVersion {
fmt.Println(version.Version())
os.Exit(0)
}
if *buildInfo {
fmt.Printf("Version: %s\nGit Commit: %s\nGit Version: %s\n", version.Version(), version.Commit(), version.GitVersion())
os.Exit(0)
}
if *waitCommand {
waitUntilDaprOutboundReady(*daprHTTPPort)
os.Exit(0)
}
if *appID == "" {
return nil, errors.New("app-id parameter cannot be empty")
}
// Apply options to all loggers
loggerOptions.SetAppID(*appID)
if err := logger.ApplyOptionsToLoggers(&loggerOptions); err != nil {
return nil, err
}
log.Infof("starting Dapr Runtime -- version %s -- commit %s", version.Version(), version.Commit())
log.Infof("log level set to: %s", loggerOptions.OutputLevel)
// Initialize dapr metrics exporter
if err := metricsExporter.Init(); err != nil {
log.Fatal(err)
}
daprHTTP, err := strconv.Atoi(*daprHTTPPort)
if err != nil {
return nil, errors.Wrap(err, "error parsing dapr-http-port flag")
}
daprAPIGRPC, err := strconv.Atoi(*daprAPIGRPCPort)
if err != nil {
return nil, errors.Wrap(err, "error parsing dapr-grpc-port flag")
}
profPort, err := strconv.Atoi(*profilePort)
if err != nil {
return nil, errors.Wrap(err, "error parsing profile-port flag")
}
var daprInternalGRPC int
if *daprInternalGRPCPort != "" {
daprInternalGRPC, err = strconv.Atoi(*daprInternalGRPCPort)
if err != nil {
return nil, errors.Wrap(err, "error parsing dapr-internal-grpc-port")
}
} else {
daprInternalGRPC, err = grpc.GetFreePort()
if err != nil {
return nil, errors.Wrap(err, "failed to get free port for internal grpc server")
}
}
var publicPort *int
if *daprPublicPort != "" {
port, cerr := strconv.Atoi(*daprPublicPort)
if cerr != nil {
return nil, errors.Wrap(cerr, "error parsing dapr-public-port")
}
publicPort = &port
}
var applicationPort int
if *appPort != "" {
applicationPort, err = strconv.Atoi(*appPort)
if err != nil {
return nil, errors.Wrap(err, "error parsing app-port")
}
}
var maxRequestBodySize int
if *daprHTTPMaxRequestSize != -1 {
maxRequestBodySize = *daprHTTPMaxRequestSize
} else {
maxRequestBodySize = DefaultMaxRequestBodySize
}
var readBufferSize int
if *daprHTTPReadBufferSize != -1 {
readBufferSize = *daprHTTPReadBufferSize
} else {
readBufferSize = DefaultReadBufferSize
}
var gracefulShutdownDuration time.Duration
if *daprGracefulShutdownSeconds == -1 {
gracefulShutdownDuration = defaultGracefulShutdownDuration
} else {
gracefulShutdownDuration = time.Duration(*daprGracefulShutdownSeconds) * time.Second
}
placementAddresses := []string{}
if *placementServiceHostAddr != "" {
placementAddresses = parsePlacementAddr(*placementServiceHostAddr)
}
var concurrency int
if *appMaxConcurrency != -1 {
concurrency = *appMaxConcurrency
}
appPrtcl := string(HTTPProtocol)
if *appProtocol != string(HTTPProtocol) {
appPrtcl = *appProtocol
}
daprAPIListenAddressList := strings.Split(*daprAPIListenAddresses, ",")
if len(daprAPIListenAddressList) == 0 {
daprAPIListenAddressList = []string{DefaultAPIListenAddress}
}
runtimeConfig := NewRuntimeConfig(*appID, placementAddresses, *controlPlaneAddress, *allowedOrigins, *config, *componentsPath,
appPrtcl, *mode, daprHTTP, daprInternalGRPC, daprAPIGRPC, daprAPIListenAddressList, publicPort, applicationPort, profPort, *enableProfiling, concurrency, *enableMTLS, *sentryAddress, *appSSL, maxRequestBodySize, *unixDomainSocket, readBufferSize, *daprHTTPStreamRequestBody, gracefulShutdownDuration, *enableAPILogging)
// set environment variables
// TODO - consider adding host address to runtime config and/or caching result in utils package
host, err := utils.GetHostAddress()
if err != nil {
log.Warnf("failed to get host address, env variable %s will not be set", env.HostAddress)
}
variables := map[string]string{
env.AppID: *appID,
env.AppPort: *appPort,
env.HostAddress: host,
env.DaprPort: strconv.Itoa(daprInternalGRPC),
env.DaprGRPCPort: *daprAPIGRPCPort,
env.DaprHTTPPort: *daprHTTPPort,
env.DaprMetricsPort: metricsExporter.Options().Port, // TODO - consider adding to runtime config
env.DaprProfilePort: *profilePort,
}
if err = setEnvVariables(variables); err != nil {
return nil, err
}
var globalConfig *global_config.Configuration
var configErr error
if *enableMTLS || *mode == string(modes.KubernetesMode) {
runtimeConfig.CertChain, err = security.GetCertChain()
if err != nil {
return nil, err
}
}
// Config and resiliency need the operator client, only initiate once and only if we will actually use it.
var operatorClient operator_v1.OperatorClient
if *mode == string(modes.KubernetesMode) && *config != "" {
log.Infof("Initializing the operator client (config: %s)", *config)
client, conn, clientErr := client.GetOperatorClient(*controlPlaneAddress, security.TLSServerName, runtimeConfig.CertChain)
if clientErr != nil {
return nil, clientErr
}
defer conn.Close()
operatorClient = client
}
var accessControlList *global_config.AccessControlList
var namespace string
var podName string
if *config != "" {
switch modes.DaprMode(*mode) {
case modes.KubernetesMode:
namespace = os.Getenv("NAMESPACE")
podName = os.Getenv("POD_NAME")
globalConfig, configErr = global_config.LoadKubernetesConfiguration(*config, namespace, podName, operatorClient)
case modes.StandaloneMode:
globalConfig, _, configErr = global_config.LoadStandaloneConfiguration(*config)
}
}
if configErr != nil {
log.Fatalf("error loading configuration: %s", configErr)
}
if globalConfig == nil {
log.Info("loading default configuration")
globalConfig = global_config.LoadDefaultConfiguration()
}
features := globalConfig.Spec.Features
resiliencyEnabled := global_config.IsFeatureEnabled(features, global_config.Resiliency)
var resiliencyProvider resiliency_config.Provider
if resiliencyEnabled {
var resiliencyConfigs []*resiliency_v1alpha.Resiliency
switch modes.DaprMode(*mode) {
case modes.KubernetesMode:
namespace = os.Getenv("NAMESPACE")
resiliencyConfigs = resiliency_config.LoadKubernetesResiliency(log, *appID, namespace, operatorClient)
case modes.StandaloneMode:
resiliencyConfigs = resiliency_config.LoadStandaloneResiliency(log, *appID, *componentsPath)
}
log.Debugf("Found %d resiliency configurations.", len(resiliencyConfigs))
resiliencyProvider = resiliency_config.FromConfigurations(log, resiliencyConfigs...)
} else {
log.Debug("Resiliency is not enabled.")
resiliencyProvider = &resiliency_config.NoOp{}
}
accessControlList, err = acl.ParseAccessControlSpec(globalConfig.Spec.AccessControlSpec, string(runtimeConfig.ApplicationProtocol))
if err != nil {
log.Fatalf(err.Error())
}
return NewDaprRuntime(runtimeConfig, globalConfig, accessControlList, resiliencyProvider), nil
}
func setEnvVariables(variables map[string]string) error {
for key, value := range variables {
err := os.Setenv(key, value)
if err != nil {
return err
}
}
return nil
}
func parsePlacementAddr(val string) []string {
parsed := []string{}
p := strings.Split(val, ",")
for _, addr := range p {
parsed = append(parsed, strings.TrimSpace(addr))
}
return parsed
}
|
[
"\"NAMESPACE\"",
"\"POD_NAME\"",
"\"NAMESPACE\""
] |
[] |
[
"NAMESPACE",
"POD_NAME"
] |
[]
|
["NAMESPACE", "POD_NAME"]
|
go
| 2 | 0 | |
test/bdd/bddtests_test.go
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package bdd
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/cucumber/godog"
"github.com/hyperledger/aries-framework-go/pkg/common/log"
"github.com/hyperledger/aries-framework-go/test/bdd/agent"
"github.com/hyperledger/aries-framework-go/test/bdd/dockerutil"
bddctx "github.com/hyperledger/aries-framework-go/test/bdd/pkg/context"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/didexchange"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/didresolver"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/introduce"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/messaging"
"github.com/hyperledger/aries-framework-go/test/bdd/pkg/route"
)
const (
SideTreeURL = "${SIDETREE_URL}"
DIDDocPath = "${DID_DOC_PATH}"
)
var composition []*dockerutil.Composition
var composeFiles = []string{"./fixtures/agent-rest", "./fixtures/sidetree-mock"}
func TestMain(m *testing.M) {
// default is to run all tests with tag @all
tags := "all"
flag.Parse()
format := "progress"
if getCmdArg("test.v") == "true" {
format = "pretty"
}
runArg := getCmdArg("test.run")
if runArg != "" {
tags = runArg
}
agentLogLevel := os.Getenv("AGENT_LOG_LEVEL")
if agentLogLevel != "" {
logLevel, err := log.ParseLevel(agentLogLevel)
if err != nil {
panic(err)
}
log.SetLevel(os.Getenv("AGENT_LOG_MODULE"), logLevel)
}
status := runBddTests(tags, format)
if st := m.Run(); st > status {
status = st
}
os.Exit(status)
}
//nolint:gocognit
func runBddTests(tags, format string) int {
return godog.RunWithOptions("godogs", func(s *godog.Suite) {
s.BeforeSuite(func() {
if os.Getenv("DISABLE_COMPOSITION") != "true" {
// Need a unique name, but docker does not allow '-' in names
composeProjectName := strings.ReplaceAll(generateUUID(), "-", "")
for _, v := range composeFiles {
newComposition, err := dockerutil.NewComposition(composeProjectName, "docker-compose.yml", v)
if err != nil {
panic(fmt.Sprintf("Error composing system in BDD context: %s", err))
}
composition = append(composition, newComposition)
}
fmt.Println("docker-compose up ... waiting for containers to start ...")
testSleep := 5
if os.Getenv("TEST_SLEEP") != "" {
var e error
testSleep, e = strconv.Atoi(os.Getenv("TEST_SLEEP"))
if e != nil {
panic(fmt.Sprintf("Invalid value found in 'TEST_SLEEP': %s", e))
}
}
fmt.Printf("*** testSleep=%d", testSleep)
time.Sleep(time.Second * time.Duration(testSleep))
}
})
s.AfterSuite(func() {
for _, c := range composition {
if c != nil {
if err := c.GenerateLogs(c.Dir, c.ProjectName+".log"); err != nil {
panic(err)
}
if _, err := c.Decompose(c.Dir); err != nil {
panic(err)
}
}
}
})
FeatureContext(s)
}, godog.Options{
Tags: tags,
Format: format,
Paths: []string{"features"},
Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order
Strict: true,
StopOnFailure: true,
})
}
func getCmdArg(argName string) string {
cmdTags := flag.CommandLine.Lookup(argName)
if cmdTags != nil && cmdTags.Value != nil && cmdTags.Value.String() != "" {
return cmdTags.Value.String()
}
return ""
}
// generateUUID returns a UUID based on RFC 4122
func generateUUID() string {
id := dockerutil.GenerateBytesUUID()
return fmt.Sprintf("%x-%x-%x-%x-%x", id[0:4], id[4:6], id[6:8], id[8:10], id[10:])
}
func FeatureContext(s *godog.Suite) {
bddContext, err := bddctx.NewBDDContext()
if err != nil {
panic(fmt.Sprintf("Error returned from NewBDDContext: %s", err))
}
// set dynamic args
bddContext.Args[SideTreeURL] = "http://localhost:48326/document"
bddContext.Args[DIDDocPath] = "fixtures/sidetree-mock/config/didDocument.json"
// Context is shared between tests
agent.NewSDKSteps(bddContext).RegisterSteps(s)
agent.NewControllerSteps(bddContext).RegisterSteps(s)
// Register did exchange tests
didexchange.NewDIDExchangeSDKSteps(bddContext).RegisterSteps(s)
didexchange.NewDIDExchangeControllerSteps(bddContext).RegisterSteps(s)
// Register introduce tests
introduce.NewIntroduceSDKSteps(bddContext).RegisterSteps(s)
// Register did resolver tests
didresolver.NewDIDResolverSteps(bddContext).RegisterSteps(s)
// Register messaging tests
messaging.NewMessagingSDKSteps(bddContext).RegisterSteps(s)
messaging.NewMessagingControllerSteps(bddContext).RegisterSteps(s)
// Register router tests
route.NewRouteSDKSteps(bddContext).RegisterSteps(s)
}
|
[
"\"AGENT_LOG_LEVEL\"",
"\"AGENT_LOG_MODULE\"",
"\"DISABLE_COMPOSITION\"",
"\"TEST_SLEEP\"",
"\"TEST_SLEEP\""
] |
[] |
[
"DISABLE_COMPOSITION",
"AGENT_LOG_LEVEL",
"TEST_SLEEP",
"AGENT_LOG_MODULE"
] |
[]
|
["DISABLE_COMPOSITION", "AGENT_LOG_LEVEL", "TEST_SLEEP", "AGENT_LOG_MODULE"]
|
go
| 4 | 0 | |
main.go
|
package main
import (
"database/sql"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/chiswicked/go-grpc-crud-server-boilerplate/api"
"github.com/chiswicked/go-grpc-crud-server-boilerplate/errs"
"github.com/chiswicked/go-grpc-crud-server-boilerplate/service"
_ "github.com/lib/pq"
"golang.org/x/net/context"
)
const (
grpcAddr = ":8090"
gwAddr = ":8080"
pgHost = "localhost"
pgPort = "5432"
pgUsername = "testusername"
pgPassword = "testpassword"
pgDatabase = "testdatabase"
pgSSLmode = "disable"
)
func main() {
fmt.Println(startMsg(os.Getenv("APP")))
var err error
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
db, err := createDbConn()
errs.FatalIf("PostgreSQL connection error", err)
defer db.Close()
err = db.Ping()
errs.PanicIf("PostgreSQL ping error", err)
srv := api.CreateAPI(db)
lsnr := service.StartTCPListener(grpcAddr)
grpcServer := service.InitGRPCServer(srv)
go service.StartGRPCServer(grpcServer, lsnr)
defer grpcServer.GracefulStop()
gwServer := service.InitGRPCGatewayServer(ctx, grpcAddr, gwAddr)
go service.StartGRPCGatewayServer(gwServer)
defer gwServer.Shutdown(ctx)
waitForShutdown()
}
func createDbConn() (*sql.DB, error) {
connStr := fmt.Sprintf(
"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s connect_timeout=60",
pgHost,
pgPort,
pgUsername,
pgPassword,
pgDatabase,
pgSSLmode,
)
return sql.Open("postgres", connStr)
}
func startMsg(app string) string {
return fmt.Sprintf("Initializing %v server", app)
}
func waitForShutdown() {
s := make(chan os.Signal, 1)
signal.Notify(s, syscall.SIGINT, syscall.SIGTERM)
<-s
log.Printf("Shutting down %s", os.Getenv("APP"))
}
|
[
"\"APP\"",
"\"APP\""
] |
[] |
[
"APP"
] |
[]
|
["APP"]
|
go
| 1 | 0 | |
examples/api_with_mongo/main.go
|
package main
import (
"context"
"encoding/json"
"log"
"net/http"
"os"
"time"
"github.com/gorilla/mux"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Config struct {
MongoUrl string
DatabaseName string
}
func NewConfig() *Config {
return &Config{
MongoUrl: os.Getenv("MONGO_URL"),
DatabaseName: os.Getenv("DATABASE_NAME"),
}
}
func ExampleHandler(w http.ResponseWriter, r *http.Request) {
post := mux.Vars(r)
config := NewConfig()
client, _ := NewMongoClient(config.MongoUrl)
collection := client.Database(config.DatabaseName).Collection("datasets")
var result map[string]interface{}
err := collection.FindOne(context.Background(), bson.D{{"id", post["id"]}}).Decode(&result)
if err != nil {
w.WriteHeader(404)
return
}
w.Header().Add("Content-Type", "application/json")
resultBody, err := json.Marshal(result)
if err != nil {
w.WriteHeader(500)
return
}
w.Write(resultBody)
}
func newRouter() http.Handler {
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/datasets/{id}", ExampleHandler).Methods("GET")
return router
}
func NewServer() *http.Server {
return &http.Server{
Handler: newRouter(),
}
}
func NewMongoClient(mongoUrl string) (*mongo.Client, error) {
config := NewConfig()
client, err := mongo.NewClient(options.Client().ApplyURI(config.MongoUrl))
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
if err := client.Connect(ctx); err != nil {
return nil, err
}
return client, nil
}
func main() {
server := NewServer()
log.Fatal(http.ListenAndServe(":10000", server.Handler))
}
|
[
"\"MONGO_URL\"",
"\"DATABASE_NAME\""
] |
[] |
[
"MONGO_URL",
"DATABASE_NAME"
] |
[]
|
["MONGO_URL", "DATABASE_NAME"]
|
go
| 2 | 0 | |
independent-projects/bootstrap/runner/src/main/java/io/quarkus/bootstrap/runner/QuarkusEntryPoint.java
|
package io.quarkus.bootstrap.runner;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLDecoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
public class QuarkusEntryPoint {
public static final String QUARKUS_APPLICATION_DAT = "quarkus/quarkus-application.dat";
public static void main(String... args) throws Throwable {
System.setProperty("java.util.logging.manager", org.jboss.logmanager.LogManager.class.getName());
Timing.staticInitStarted();
doRun(args);
}
private static void doRun(Object args) throws IOException, ClassNotFoundException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
String path = QuarkusEntryPoint.class.getProtectionDomain().getCodeSource().getLocation().getPath();
String decodedPath = URLDecoder.decode(path, "UTF-8");
Path appRoot = new File(decodedPath).toPath().getParent().getParent().getParent();
if (Boolean.parseBoolean(System.getenv("QUARKUS_LAUNCH_DEVMODE"))) {
DevModeMediator.doDevMode(appRoot);
} else if (Boolean.getBoolean("quarkus.launch.rebuild")) {
doReaugment(appRoot);
} else {
SerializedApplication app = null;
try (InputStream in = Files.newInputStream(appRoot.resolve(QUARKUS_APPLICATION_DAT))) {
app = SerializedApplication.read(in, appRoot);
Thread.currentThread().setContextClassLoader(app.getRunnerClassLoader());
Class<?> mainClass = app.getRunnerClassLoader().loadClass(app.getMainClass());
mainClass.getMethod("main", String[].class).invoke(null, args);
} finally {
if (app != null) {
app.getRunnerClassLoader().close();
}
}
}
}
private static void doReaugment(Path appRoot) throws IOException, ClassNotFoundException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
try (ObjectInputStream in = new ObjectInputStream(
Files.newInputStream(appRoot.resolve("lib/deployment/deployment-class-path.dat")))) {
List<String> paths = (List<String>) in.readObject();
//yuck, should use runner class loader
URLClassLoader loader = new URLClassLoader(paths.stream().map((s) -> {
try {
return appRoot.resolve(s).toUri().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}).toArray(URL[]::new));
try {
loader.loadClass("io.quarkus.deployment.mutability.ReaugmentTask")
.getDeclaredMethod("main", Path.class).invoke(null, appRoot);
} finally {
loader.close();
}
}
}
}
|
[
"\"QUARKUS_LAUNCH_DEVMODE\""
] |
[] |
[
"QUARKUS_LAUNCH_DEVMODE"
] |
[]
|
["QUARKUS_LAUNCH_DEVMODE"]
|
java
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.