filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
libzkchannels_test.go | package libzkchannels
import (
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func WriteToFile(filename string, data string) error {
if filename == "" {
return nil
}
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
_, err = io.WriteString(file, data)
if err != nil {
return err
}
return file.Sync()
}
func MerchantGenerateCustClaimTx(txid string, ToMerchantAmount int64, merchState MerchState, targetTxFile string) {
// Merchant claim tx to_merchant output from cust-close-from-escrow-tx (spendable immediately)
outputPk2 := "03af0530f244a154b278b34de709b84bb85bb39ff3f1302fc51ae275e5a45fb353"
txFee := int64(1000)
claimAmount := ToMerchantAmount - txFee // with some fee
SignedMerchClaimTx, err := MerchantSignCustClaimTx(txid, uint32(1), ToMerchantAmount, claimAmount, outputPk2, merchState)
if err == nil {
WriteToFile(targetTxFile, SignedMerchClaimTx)
}
return
}
func Test_fullProtocolWithValidUTXO(t *testing.T) {
dbUrl := "redis://127.0.0.1/"
valCpfp := int64(1000)
minThreshold := int64(546)
selfDelay := int16(1487) // used to be 1487
self_delay := os.Getenv("TIMELOCK")
if self_delay != "" {
to_self_delay, err := strconv.ParseUint(self_delay, 10, 16)
assert.Nil(t, err)
selfDelay = int16(to_self_delay)
fmt.Println("Using timelock: ", selfDelay)
}
txFeeInfo := TransactionFeeInfo{
BalMinCust: minThreshold,
BalMinMerch: minThreshold,
ValCpFp: valCpfp,
FeeCC: 1000,
FeeMC: 1000,
MinFee: 0,
MaxFee: 10000,
}
feeCC := txFeeInfo.FeeCC
feeMC := txFeeInfo.FeeMC
channelState, err := ChannelSetup("channel", selfDelay, txFeeInfo.BalMinCust, txFeeInfo.BalMinMerch, txFeeInfo.ValCpFp, false)
assert.Nil(t, err)
channelState, merchState, err := InitMerchant(dbUrl, channelState, "merch")
assert.Nil(t, err)
skM := "e6e0c5310bb03809e1b2a1595a349f002125fa557d481e51f401ddaf3287e6ae"
payoutSkM := "5611111111111111111111111111111100000000000000000000000000000000"
childSkM := "5811111111111111111111111111111100000000000000000000000000000000"
disputeSkM := "5711111111111111111111111111111100000000000000000000000000000000"
channelState, merchState, err = LoadMerchantWallet(merchState, channelState, skM, payoutSkM, childSkM, disputeSkM)
assert.Nil(t, err)
custBal := int64(1000000)
merchBal := int64(1000000)
merchPKM := fmt.Sprintf("%v", *merchState.PkM)
channelToken, custState, err := InitCustomer(merchPKM, custBal, merchBal, txFeeInfo, "cust")
assert.Nil(t, err)
fix_customer_wallet := os.Getenv("FIX_CUSTOMER_WALLET")
if fix_customer_wallet == "yes" {
fmt.Println("Loading an external wallet...")
skC := "1a1971e1379beec67178509e25b6772c66cb67bb04d70df2b4bcdb8c08a01827"
payoutSk := "4157697b6428532758a9d0f9a73ce58befe3fd665797427d1c5bb3d33f6a132e"
channelToken, custState, err = LoadCustomerWallet(custState, channelToken, skC, payoutSk)
assert.Nil(t, err)
}
// inputSats := int64(50 * 100000000)
inputSats := int64(100000000) // when using make_n_utxo.py
fmt.Println("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
cust_utxo_txid := os.Getenv("UTXO_TXID")
if cust_utxo_txid == "" {
fmt.Println("Using a dummy UTXO_TXID instead.")
cust_utxo_txid = "e8aed42b9f07c74a3ce31a9417146dc61eb8611a1e66d345fd69be06b644278d"
}
fmt.Println("Using UTXO txid: ", cust_utxo_txid)
utxo_index := os.Getenv("UTXO_INDEX")
cust_utxo_index := uint32(0)
if utxo_index != "" {
idx, err := strconv.ParseUint(utxo_index, 10, 32)
assert.Nil(t, err)
cust_utxo_index = uint32(idx)
}
fmt.Println("Using UTXO index: ", cust_utxo_index)
csk := os.Getenv("UTXO_SK")
if csk == "" {
csk = fmt.Sprintf("%v", "5511111111111111111111111111111100000000000000000000000000000000")
}
custInputSk := csk
fmt.Println("Using custInputSk: ", custInputSk)
fmt.Println("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
// files to write to
EscrowTxFile := ""
MerchCloseTxFile := ""
FirstCustCloseEscrowTxFile := ""
MerchClaimViaFirstCustCloseEscrowTxFile := ""
FirstCustCloseMerchTxFile := ""
MerchClaimViaFirstCustCloseMerchTxFile := ""
CustCloseEscrowTxFile := ""
CustCloseFromMerchTxFile := ""
CustClaimFromCustCloseEscrowTxFile := ""
CustClaimFromCustCloseMerchTxFile := ""
MerchClaimFromEscrowTxFile := ""
MerchClaimFromMerchTxFile := ""
MerchDisputeFirstCustCloseTxFile := ""
MerchDisputeFirstCustCloseFromMerchTxFile := ""
MerchClaimFromMerchCloseTxFile := ""
MutualCloseTxFile := ""
// SignSeparateClaimChildOutputTxFile := ""
SignBumpFeeChildTxFile := ""
save_tx_file := os.Getenv("UTXO_SAVE_TX")
if save_tx_file == "yes" {
index := cust_utxo_index
// stores the escrow-tx
EscrowTxFile = fmt.Sprintf("signed_escrow_%d.txt", index)
// stores the merch-close-tx (unilateral close for merchant)
MerchCloseTxFile = fmt.Sprintf("signed_merch_close_%d.txt", index)
// stores first cust-close-from-escrow-tx (old state)
FirstCustCloseEscrowTxFile = fmt.Sprintf("signed_first_cust_close_escrow_tx_%d.txt", index)
// stores merch claim to_merchant in first cust-close-from-escrow-tx (immediately)
MerchClaimViaFirstCustCloseEscrowTxFile = fmt.Sprintf("signed_merch_claim_first_close_escrow_tx_%d.txt", index)
// stores first cust-close-from-merch-close-tx (old state)
FirstCustCloseMerchTxFile = fmt.Sprintf("signed_first_cust_close_merch_tx_%d.txt", index)
// stores merch claim to_merchant in first cust-close-from-merch-close-tx (immediately)
MerchClaimViaFirstCustCloseMerchTxFile = fmt.Sprintf("signed_merch_claim_first_close_merch_tx_%d.txt", index)
// stores cust-close-from-escrow-tx (current state)
CustCloseEscrowTxFile = fmt.Sprintf("signed_cust_close_escrow_tx_%d.txt", index)
// stores cust-close-from-merch-close-tx (current state)
CustCloseFromMerchTxFile = fmt.Sprintf("signed_cust_close_merch_tx_%d.txt", index)
// stores to_customer claim tx for cust-close-from-escrow-tx (after timelock)
CustClaimFromCustCloseEscrowTxFile = fmt.Sprintf("signed_cust_claim_escrow_tx_%d.txt", index)
// stores to_customer claim tx for cust-close-from-merch-close-tx (after timelock)
CustClaimFromCustCloseMerchTxFile = fmt.Sprintf("signed_cust_claim_merch_tx_%d.txt", index)
// stores to_merchant claim tx for cust-close-from-escrow-tx (immediately)
MerchClaimFromEscrowTxFile = fmt.Sprintf("signed_merch_claim_escrow_tx_%d.txt", index)
// stores to_merchant claim tx for cust-close-from-merch-close-tx (immediately)
MerchClaimFromMerchTxFile = fmt.Sprintf("signed_merch_claim_merch_tx_%d.txt", index)
// stores merch_dispute tx for cust-close-from-escrow-tx (old state)
MerchDisputeFirstCustCloseTxFile = fmt.Sprintf("signed_dispute_from_escrow_tx_%d.txt", index)
// stores merch_dispute tx for cust-close-from-merch-close-tx (old state)
MerchDisputeFirstCustCloseFromMerchTxFile = fmt.Sprintf("signed_dispute_from_merch_tx_%d.txt", index)
// stores merch claim tx for full balance in merch-close-tx (after timelock)
MerchClaimFromMerchCloseTxFile = fmt.Sprintf("signed_merch_claim_merch_close_tx_%d.txt", index)
// stores mutual close tx for most recent balance of the channel
MutualCloseTxFile = fmt.Sprintf("signed_mutual_close_tx_%d.txt", index)
// store child tx that bumps fee via cpfp + p2wpkh utxo input
SignBumpFeeChildTxFile = fmt.Sprintf("signed_bump_fee_child_tx_p2wpkh_%d.txt", index)
}
custSk := fmt.Sprintf("%v", custState.SkC)
custPk := fmt.Sprintf("%v", custState.PkC)
merchSk := fmt.Sprintf("%v", *merchState.SkM)
merchPk := fmt.Sprintf("%v", *merchState.PkM)
changeSk := "8c5f4b5be9b71eb9c93e9e805b39d445b8fc6c5f8bf6ebecedef9a45ee150b44"
changePk := "0376dbe15da5257bfc94c37a8af793e022f01a6d981263a73defe292a564c691d2"
merchClosePk := fmt.Sprintf("%v", *merchState.PayoutPk)
merchChildPk := fmt.Sprintf("%v", *merchState.ChildPk)
merchDispPk := fmt.Sprintf("%v", *merchState.DisputePk)
// toSelfDelay := "05cf" // 1487 blocks
toSelfDelay, err := GetSelfDelayBE(channelState)
assert.Nil(t, err)
fmt.Println("toSelfDelay (BE) :=> ", toSelfDelay)
fmt.Println("custSk :=> ", custSk)
fmt.Println("custPk :=> ", custPk)
fmt.Println("merchSk :=> ", merchSk)
fmt.Println("merchPk :=> ", merchPk)
fmt.Println("merchClosePk :=> ", merchClosePk)
fmt.Println("merchChildPk :=> ", merchChildPk)
outputSats := custBal + merchBal
txFee := int64(500)
signedEscrowTx, escrowTxid_BE, escrowTxid_LE, escrowPrevout, err := SignEscrowTx(cust_utxo_txid, cust_utxo_index, custInputSk, inputSats, outputSats, custPk, merchPk, changePk, false, txFee)
WriteToFile(EscrowTxFile, signedEscrowTx)
assert.Nil(t, err)
fmt.Println("========================================")
fmt.Println("escrow txid (LE) => ", escrowTxid_LE)
fmt.Println("escrow txid (BE) => ", escrowTxid_BE)
fmt.Println("escrow prevout => ", escrowPrevout)
fmt.Println("TX1: signedEscrowTx => ", signedEscrowTx)
fmt.Println("========================================")
merchTxPreimage, err := FormMerchCloseTx(escrowTxid_LE, custPk, merchPk, merchClosePk, merchChildPk, custBal, merchBal, feeMC, txFeeInfo.ValCpFp, toSelfDelay)
assert.Nil(t, err)
fmt.Println("merch TxPreimage => ", merchTxPreimage)
custSig, err := CustomerSignMerchCloseTx(custSk, merchTxPreimage)
fmt.Println("cust sig for merchCloseTx => ", custSig)
assert.Nil(t, err)
isOk, merchTxid_BE, merchTxid_LE, merchPrevout, merchState, err := MerchantVerifyMerchCloseTx(escrowTxid_LE, custPk, custBal, merchBal, feeMC, txFeeInfo.ValCpFp, toSelfDelay, custSig, merchState)
fmt.Println("orig merch txid (BE) = ", merchTxid_BE)
fmt.Println("orig merch txid (LE) = ", merchTxid_LE)
fmt.Println("orig merch prevout = ", merchPrevout)
if !isOk {
t.Error("FAILED to verify the cust signature on merch-close-tx", err)
return
}
txInfo := FundingTxInfo{
EscrowTxId: escrowTxid_BE, // big-endian
EscrowPrevout: escrowPrevout, // big-endian
MerchTxId: merchTxid_BE, // big-endian
MerchPrevout: merchPrevout, // big-endian
InitCustBal: custBal,
InitMerchBal: merchBal,
}
fmt.Println("RevLock => ", custState.RevLock)
custCloseSk := fmt.Sprintf("%v", custState.PayoutSk)
custClosePk := custState.PayoutPk
escrowSig, merchSig, err := MerchantSignInitCustCloseTx(txInfo, custState.RevLock, custState.PkC, custClosePk, toSelfDelay, merchState, feeCC, feeMC, valCpfp)
assert.Nil(t, err)
fmt.Println("escrow sig: ", escrowSig)
fmt.Println("merch sig: ", merchSig)
isOk, channelToken, custState, err = CustomerVerifyInitCustCloseTx(txInfo, txFeeInfo, channelState, channelToken, escrowSig, merchSig, custState)
if !isOk {
t.Error("FAILED to verify the merch signatures on init cust-close-tx", err)
return
}
assert.Nil(t, err)
initCustState, initHash, err := CustomerGetInitialState(custState)
assert.Nil(t, err)
fmt.Println("initial cust state: ", initCustState)
fmt.Println("initial hash: ", initHash)
isOk, merchState, err = MerchantValidateInitialState(channelToken, initCustState, initHash, merchState)
assert.Nil(t, err)
fmt.Println("merchant validates initial state: ", isOk)
if !isOk {
t.Error("error: ", err)
}
fmt.Println("initial close transactions validated: ", isOk)
_, err = CustomerChangeChannelStatusToPendingClose(custState)
assert.Equal(t, "transition not allowed for channel: PendingOpen => PendingClose", err.Error())
_, err = CustomerChangeChannelStatusToConfirmedClose(custState)
assert.Equal(t, "transition not allowed for channel: PendingOpen => ConfirmedClose", err.Error())
custState, err = CustomerChangeChannelStatusToOpen(custState)
merchState, err = MerchantChangeChannelStatusToOpen(escrowTxid_LE, merchState)
if isOk {
// initiate merch-close-tx
signedMerchCloseTx, merchTxid2_BE, merchTxid2_LE, _, err := ForceMerchantCloseTx(escrowTxid_LE, merchState, txFeeInfo.ValCpFp)
WriteToFile(MerchCloseTxFile, signedMerchCloseTx)
assert.Nil(t, err)
assert.NotNil(t, merchTxid2_BE)
fmt.Println("========================================")
fmt.Println("TX2: Merchant has signed merch close tx => ", signedMerchCloseTx)
fmt.Println("merch txid = ", merchTxid2_LE)
fmt.Println("========================================")
}
fmt.Println("Output initial closing transactions")
CloseEscrowTx, CloseEscrowTxId_LE, custState, err := ForceCustomerCloseTx(channelState, channelToken, true, custState)
assert.Nil(t, err)
initCustBalPayout := custState.CustBalance
WriteToFile(FirstCustCloseEscrowTxFile, CloseEscrowTx)
CloseEscrowTxId_TX3 := CloseEscrowTxId_LE
assert.NotNil(t, CloseEscrowTxId_LE)
fmt.Println("========================================")
fmt.Println("TX3: Close EscrowTx ID (LE): ", CloseEscrowTxId_LE)
fmt.Println("TX3: Close from EscrowTx => ", string(CloseEscrowTx))
fmt.Println("========================================")
MerchantGenerateCustClaimTx(CloseEscrowTxId_TX3, custState.MerchBalance, merchState, MerchClaimViaFirstCustCloseEscrowTxFile)
CloseMerchTx, CloseMerchTxId_LE, custState, err := ForceCustomerCloseTx(channelState, channelToken, false, custState)
custState.ChannelStatus = "Open" // set back to Open for remaining tests (bypasses API with appropriate checks)
WriteToFile(FirstCustCloseMerchTxFile, CloseMerchTx)
assert.NotNil(t, CloseMerchTxId_LE)
CloseMerchTxId_TX3 := CloseMerchTxId_LE
fmt.Println("TX4: Close MerchTx ID (LE): ", CloseMerchTxId_LE)
fmt.Println("TX4: Close from MerchCloseTx => ", string(CloseMerchTx))
{
inputAmount0 := custState.MerchBalance - feeMC - valCpfp
MerchantGenerateCustClaimTx(CloseMerchTxId_LE, inputAmount0, merchState, MerchClaimViaFirstCustCloseMerchTxFile)
}
/////////////////////////////////////////////////////////
fmt.Println("Proceed with channel activation...")
(*merchState.ChannelStatusMap)[escrowTxid_BE] = "Open"
channelId, err := GetChannelId(channelToken)
assert.Nil(t, err)
fmt.Println("Channel ID: ", channelId)
state, custState, err := ActivateCustomer(custState)
assert.Nil(t, err)
payToken0, merchState, err := ActivateMerchant(channelToken, state, merchState)
assert.Nil(t, err)
custState, err = ActivateCustomerFinalize(payToken0, custState)
assert.Nil(t, err)
fmt.Println("channel activated...")
// unlink should happen at this point (0-value payment)
fmt.Println("proceed with pay protocol...")
revState, newState, revLockCom, sessionId, custState, err := PreparePaymentCustomer(channelState, 10, custState)
assert.Nil(t, err)
fmt.Println("New session ID: ", sessionId)
assert.NotNil(t, revState)
assert.NotNil(t, newState)
assert.NotNil(t, channelState)
assert.NotNil(t, custState)
fmt.Println("Nonce: ", state.Nonce)
fmt.Println("RevLockCom: ", revLockCom)
justification := ""
payTokenMaskCom, merchState, err := PreparePaymentMerchant(channelState, sessionId, state.Nonce, revLockCom, 10, justification, merchState)
assert.Nil(t, err)
go runPayCust(channelState, channelToken, state, newState, payTokenMaskCom, revLockCom, custState)
isOk, merchState, err = PayUpdateMerchant(channelState, sessionId, payTokenMaskCom, merchState, nil, nil, nil)
assert.Nil(t, err)
time.Sleep(time.Second * 5)
if !isOk {
t.Error("MPC execution failed for merchant!", err)
}
success := os.Getenv("successString")
success = strings.Trim(success, " ")
assert.True(t, isOk)
maskedTxInputs, err := PayConfirmMPCResult(sessionId, success, merchState)
assert.Nil(t, err)
serCustState := os.Getenv("custStateRet")
err = json.Unmarshal([]byte(serCustState), &custState)
assert.Nil(t, err)
isOk, custState, err = PayUnmaskSigsCustomer(channelState, channelToken, maskedTxInputs, custState)
assert.Nil(t, err)
assert.True(t, isOk)
payTokenMask, payTokenMaskR, merchState, err := PayValidateRevLockMerchant(sessionId, revState, merchState)
assert.Nil(t, err)
isOk, custState, err = PayUnmaskPayTokenCustomer(payTokenMask, payTokenMaskR, custState)
assert.Nil(t, err)
assert.True(t, isOk)
// Customer initiates close and generates cust-close-from-escrow-tx
fmt.Println("Get new signed close transactions...")
CloseEscrowTx, CloseEscrowTxId_LE, custState, err = ForceCustomerCloseTx(channelState, channelToken, true, custState)
WriteToFile(CustCloseEscrowTxFile, CloseEscrowTx)
assert.Nil(t, err)
assert.NotNil(t, CloseEscrowTxId_LE)
fmt.Println("TX5: Close EscrowTx ID (LE): ", CloseEscrowTxId_LE)
fmt.Println("TX5: Close from EscrowTx => ", string(CloseEscrowTx))
_, err = CustomerChangeChannelStatusToConfirmedClose(custState)
assert.Equal(t, "transition not allowed for channel: CustomerInitClose => ConfirmedClose", err.Error())
custState, err = CustomerChangeChannelStatusToPendingClose(custState)
if err != nil {
t.Error("Failed to change close status to pending -", err)
}
// Customer claim tx from cust-close-from-escrow-tx
fmt.Println("========================================")
outputPk := changePk
inputAmount0 := custState.CustBalance - feeCC - feeMC
cpfpAmount := int64(valCpfp)
cpfpIndex := uint32(3)
claimAmount := inputAmount0 + cpfpAmount - txFee
SignedCustClaimTx, err := CustomerSignClaimTx(channelState, CloseEscrowTxId_LE, uint32(0), inputAmount0, claimAmount, toSelfDelay, outputPk, custState.RevLock, custClosePk, cpfpIndex, cpfpAmount, custState)
assert.Nil(t, err)
fmt.Println("TX5-cust-claim-tx: ", SignedCustClaimTx)
WriteToFile(CustClaimFromCustCloseEscrowTxFile, SignedCustClaimTx)
// Merchant claim tx to_merchant output from cust-close-from-escrow-tx (spendable immediately)
MerchantGenerateCustClaimTx(CloseEscrowTxId_LE, custState.MerchBalance, merchState, MerchClaimFromEscrowTxFile)
// Customer can also close from merch-close-tx
CloseMerchTx, CloseMerchTxId_LE, custState, err = ForceCustomerCloseTx(channelState, channelToken, false, custState)
assert.Nil(t, err)
assert.NotNil(t, CloseMerchTxId_LE)
WriteToFile(CustCloseFromMerchTxFile, CloseMerchTx)
fmt.Println("TX6: Close MerchTx ID (LE): ", CloseMerchTxId_LE)
fmt.Println("TX6: Close from MerchCloseTx => ", string(CloseMerchTx))
{
// try to claim from cust-close-from-merch-tx
// cpfpAmount := int64(valCpfp)
// cpfpIndex := uint32(3)
inputAmount1 := custState.CustBalance - feeCC - feeMC
claimAmount := inputAmount1 - txFee
SignedCustClaimTx2, err := CustomerSignClaimTx(channelState, CloseMerchTxId_LE, uint32(0), inputAmount1, claimAmount, toSelfDelay, outputPk, custState.RevLock, custClosePk, uint32(0), int64(0), custState)
assert.Nil(t, err)
WriteToFile(CustClaimFromCustCloseMerchTxFile, SignedCustClaimTx2)
// Merchant claim tx to_merchant output from cust-close-from-merch-tx (spendable immediately)
inputAmount2 := custState.MerchBalance - feeMC - valCpfp
MerchantGenerateCustClaimTx(CloseMerchTxId_LE, inputAmount2, merchState, MerchClaimFromMerchTxFile)
}
// Merchant checks whether it has seen RevLock from cust-close-tx on chain
isOldRevLock, FoundRevSecret, err := MerchantCheckRevLock(revState.RevLock, merchState)
assert.Nil(t, err)
fmt.Println("Looking for rev lock: ", revState.RevLock)
if isOldRevLock {
fmt.Println("Found rev secret: ", FoundRevSecret)
} else {
fmt.Println("Could not find rev secret!")
}
// Dispute scenario - If the customer has broadcast CloseEscrowTx and the revLock is an old revLock
index := uint32(0)
// amount := custBal // - 10
disputedInAmt := initCustBalPayout
fmt.Println("Disputing this amount: ", disputedInAmt)
// ideally generate new changePk
outputPk = changePk
fmt.Println("========================================")
fmt.Println("custClosePk :=> ", custClosePk)
fmt.Println("merchDisputePk :=> ", merchDispPk)
claimAmount = disputedInAmt - feeCC - feeMC
claimOutAmount := claimAmount - txFee
{
(*merchState.ChannelStatusMap)[escrowTxid_BE] = "CustomerInitClose"
disputeTx, merchState, err := MerchantSignDisputeTx(escrowTxid_LE, CloseEscrowTxId_TX3, index, claimAmount, claimOutAmount, toSelfDelay, outputPk, revState.RevLock, FoundRevSecret, custClosePk, merchState)
assert.Nil(t, err)
fmt.Println("========================================")
fmt.Println("TX5: disputeCloseEscrowTx: ", disputeTx)
fmt.Println("========================================")
WriteToFile(MerchDisputeFirstCustCloseTxFile, disputeTx)
SignedMerchDisputeTx2, _, err := MerchantSignDisputeTx(escrowTxid_LE, CloseMerchTxId_TX3, index, claimAmount, claimOutAmount, toSelfDelay, outputPk, revState.RevLock, FoundRevSecret, custClosePk, merchState)
assert.Nil(t, err)
WriteToFile(MerchDisputeFirstCustCloseFromMerchTxFile, SignedMerchDisputeTx2)
}
{
// cpfp output of final cust close from escrow tx
index1 := uint32(3)
inputAmount := valCpfp
// bump fee - claim the cpfp output + combine with another utxo to confirm parent transaction on chain
txid2_LE := escrowTxid_LE // use the change output from escrowTx
index2 := uint32(1)
inputAmount2 := int64(inputSats - txFee - outputSats)
sk2 := changeSk
txFee := int64(500000)
finalOutputPk := "034db01f7308e30c4ed380713bc09a70d27f19dbdc40229b36fcfae65e7f186baa"
SignedChildTx2, txid2, err := CreateChildTxToBumpFeeViaP2WPKH(CloseEscrowTxId_LE, index1, inputAmount, custCloseSk, txid2_LE, index2, inputAmount2, sk2, txFee, finalOutputPk)
assert.Nil(t, err)
fmt.Println("Signed child tx 2: ", SignedChildTx2)
fmt.Println("Signed child tx 2 txid: ", txid2)
WriteToFile(SignBumpFeeChildTxFile, SignedChildTx2)
}
// Merchant can claim tx output from merch-close-tx after timeout
fmt.Println("Claim tx from merchant close tx")
claimAmount = custBal + merchBal
claimAmount = claimAmount - feeCC - feeMC
claimOutAmount = claimAmount - txFee
SignedMerchClaimTx, err := MerchantSignMerchClaimTx(merchTxid_LE, index, claimAmount, claimOutAmount, toSelfDelay, custPk, outputPk, 0, 0, merchState)
assert.Nil(t, err)
fmt.Println("TX2-merch-close-claim-tx: ", SignedMerchClaimTx)
fmt.Println("========================================")
WriteToFile(MerchClaimFromMerchCloseTxFile, SignedMerchClaimTx)
custState, err = CustomerChangeChannelStatusToPendingClose(custState)
if err != nil {
t.Error("Failed to change close status to pending close -", err)
}
custState, err = CustomerChangeChannelStatusToConfirmedClose(custState)
if err != nil {
t.Error("Failed to change close status to confirmed -", err)
}
custState, err = CutstomerClearChannelStatus(custState)
if err != nil {
t.Error("Failed to clear close status for customer -", err)
}
_, err = MerchantChangeChannelStatusToConfirmedClose(escrowTxid_LE, merchState)
if err != nil {
t.Error("Failed to change close status to confirmed -", err)
}
merchState, err = MerchantClearChannelStatus(escrowTxid_LE, merchState)
if err != nil {
t.Error("Failed to clear close status for merchant -", err)
}
// test mutual close tx flow here
escrowedAmount := outputSats
custAmount := custState.CustBalance - feeCC
merchAmount := custState.MerchBalance
mCustSig, err := CustomerSignMutualCloseTx(escrowTxid_LE, index, escrowedAmount, custAmount, merchAmount, merchClosePk, custClosePk, merchPk, custPk, custSk)
assert.Nil(t, err)
fmt.Println("Cust sig for mutual tx: ", mCustSig)
SignedMutualCloseTx, mTxid, err := MerchantSignMutualCloseTx(escrowTxid_LE, index, escrowedAmount, custAmount, merchAmount, merchClosePk, custClosePk, merchPk, custPk, mCustSig, merchSk)
assert.Nil(t, err)
fmt.Println("Signed tx: ", SignedMutualCloseTx)
fmt.Println("txId: ", mTxid)
WriteToFile(MutualCloseTxFile, SignedMutualCloseTx)
fmt.Println("Successful test!")
return
}
func runPayCust(channelState ChannelState, channelToken ChannelToken, state State, newState State, payTokenMaskCom string, revLockCom string, custState CustState) {
serChannelState, _ := json.Marshal(channelState)
os.Setenv("channelState", string(serChannelState))
serChannelToken, _ := json.Marshal(channelToken)
os.Setenv("channelToken", string(serChannelToken))
serState, _ := json.Marshal(state)
os.Setenv("state", string(serState))
serNewState, _ := json.Marshal(newState)
os.Setenv("newState", string(serNewState))
os.Setenv("payTokenMaskCom", payTokenMaskCom)
os.Setenv("revLockCom", revLockCom)
serCustState, _ := json.Marshal(custState)
os.Setenv("custState", string(serCustState))
os.Setenv("runTest", "true")
c := exec.Command("go", "test", "-v", "libzkchannels.go", "libzkchannels_test.go", "-run", "TestPayUpdateCustomer")
c.Env = os.Environ()
out, _ := c.Output()
// fmt.Println("output: ", string(out))
os.Setenv("custStateRet", strings.Split(string(out), "|||")[1])
os.Setenv("successString", strings.Split(string(out), "*-*")[1])
os.Setenv("runTest", "")
}
func TestPayUpdateCustomer(t *testing.T) {
if os.Getenv("runTest") == "" {
t.Skip("Skip test when not called from other test")
}
channelState := ChannelState{}
err := json.Unmarshal([]byte(os.Getenv("channelState")), &channelState)
assert.Nil(t, err)
channelToken := ChannelToken{}
err = json.Unmarshal([]byte(os.Getenv("channelToken")), &channelToken)
assert.Nil(t, err)
state := State{}
err = json.Unmarshal([]byte(os.Getenv("state")), &state)
assert.Nil(t, err)
newState := State{}
err = json.Unmarshal([]byte(os.Getenv("newState")), &newState)
assert.Nil(t, err)
payTokenMaskCom := os.Getenv("payTokenMaskCom")
revLockCom := os.Getenv("revLockCom")
custState := CustState{}
err = json.Unmarshal([]byte(os.Getenv("custState")), &custState)
assert.Nil(t, err)
success, custState, err := PayUpdateCustomer(channelState, channelToken, state, newState, payTokenMaskCom, revLockCom, 10, custState, nil, nil, nil)
assert.Nil(t, err)
serCustState, err := json.Marshal(custState)
t.Log("\n|||", string(serCustState), "|||\n")
t.Log("\n*-*", success, "*-*\n")
assert.Nil(t, err)
}
| [
"\"TIMELOCK\"",
"\"FIX_CUSTOMER_WALLET\"",
"\"UTXO_TXID\"",
"\"UTXO_INDEX\"",
"\"UTXO_SK\"",
"\"UTXO_SAVE_TX\"",
"\"successString\"",
"\"custStateRet\"",
"\"runTest\"",
"\"channelState\"",
"\"channelToken\"",
"\"state\"",
"\"newState\"",
"\"payTokenMaskCom\"",
"\"revLockCom\"",
"\"custState\""
] | [] | [
"successString",
"UTXO_TXID",
"UTXO_SK",
"UTXO_SAVE_TX",
"FIX_CUSTOMER_WALLET",
"state",
"runTest",
"channelState",
"payTokenMaskCom",
"revLockCom",
"custStateRet",
"TIMELOCK",
"custState",
"newState",
"UTXO_INDEX",
"channelToken"
] | [] | ["successString", "UTXO_TXID", "UTXO_SK", "UTXO_SAVE_TX", "FIX_CUSTOMER_WALLET", "state", "runTest", "channelState", "payTokenMaskCom", "revLockCom", "custStateRet", "TIMELOCK", "custState", "newState", "UTXO_INDEX", "channelToken"] | go | 16 | 0 | |
common/testutils/initpq.go | package testutils
import (
"database/sql"
"fmt"
"os"
"strings"
// postgres driver
_ "github.com/lib/pq"
)
// ConnectPQ connectes to a postgres database for testing purposes
func ConnectPQ() (*sql.DB, error) {
host := os.Getenv("YAGPDB_TEST_PQ_HOST")
if host == "" {
host = "localhost"
}
user := os.Getenv("YAGPDB_TEST_PQ_USER")
if user == "" {
user = "yagpdb_test"
}
dbPassword := os.Getenv("YAGPDB_TEST_PQ_PASSWORD")
sslMode := os.Getenv("YAGPDB_TEST_PQ_SSLMODE")
if sslMode == "" {
sslMode = "disable"
}
dbName := os.Getenv("YAGPDB_TEST_PQ_DB")
if dbName == "" {
dbName = "yagpdb_test"
}
if !strings.Contains(dbName, "test") {
panic("Test database name has to contain 'test'T this is a safety measure to protect against running tests on production systems.")
}
connStr := fmt.Sprintf("host=%s user=%s dbname=%s sslmode=%s password='%s'", host, user, dbName, sslMode, dbPassword)
connStrPWCensored := fmt.Sprintf("host=%s user=%s dbname=%s sslmode=%s password='%s'", host, user, dbName, sslMode, "***")
fmt.Println("Postgres connection string being used: " + connStrPWCensored)
conn, err := sql.Open("postgres", connStr)
return conn, err
}
// InitTables will drop the provided tables and initialize the new ones
func InitTables(db *sql.DB, dropTables []string, initQueries []string) error {
for _, v := range dropTables {
_, err := db.Exec("DROP TABLE IF EXISTS " + v)
if err != nil {
return err
}
}
for _, v := range initQueries {
_, err := db.Exec(v)
if err != nil {
return err
}
}
return nil
}
// InitPQ is a helper that calls both ConnectPQ and InitTables
func InitPQ(dropTables []string, initQueries []string) (*sql.DB, error) {
db, err := ConnectPQ()
if err != nil {
return nil, err
}
err = InitTables(db, dropTables, initQueries)
return db, err
}
// ClearTables deletes all rows from a table, and panics if an error occurs
// usefull for defers for test cleanup
func ClearTables(db *sql.DB, tables ...string) {
for _, v := range tables {
_, err := db.Exec("DELETE FROM " + v + ";")
if err != nil {
panic(err)
}
}
}
| [
"\"YAGPDB_TEST_PQ_HOST\"",
"\"YAGPDB_TEST_PQ_USER\"",
"\"YAGPDB_TEST_PQ_PASSWORD\"",
"\"YAGPDB_TEST_PQ_SSLMODE\"",
"\"YAGPDB_TEST_PQ_DB\""
] | [] | [
"YAGPDB_TEST_PQ_HOST",
"YAGPDB_TEST_PQ_USER",
"YAGPDB_TEST_PQ_DB",
"YAGPDB_TEST_PQ_SSLMODE",
"YAGPDB_TEST_PQ_PASSWORD"
] | [] | ["YAGPDB_TEST_PQ_HOST", "YAGPDB_TEST_PQ_USER", "YAGPDB_TEST_PQ_DB", "YAGPDB_TEST_PQ_SSLMODE", "YAGPDB_TEST_PQ_PASSWORD"] | go | 5 | 0 | |
zapbox/encoder.go | // Copyright 2019 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zapbox
import (
"encoding/base64"
"encoding/json"
"math"
"os"
"path"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/logrusorgru/aurora"
. "github.com/logrusorgru/aurora"
"go.uber.org/zap"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"golang.org/x/crypto/ssh/terminal"
)
const (
ansiColorEscape = "\033["
clearANSIModifier = ansiColorEscape + "0m"
grayFg = (Color(232 + 12)) << 16
)
var bufferpool = buffer.NewPool()
var levelToColor map[zapcore.Level]Color
var _loggerPool = sync.Pool{New: func() interface{} {
return &Encoder{}
}}
func init() {
levelToColor = make(map[zapcore.Level]Color)
levelToColor[zap.DebugLevel] = MagentaFg
levelToColor[zap.InfoLevel] = GreenFg
levelToColor[zap.WarnLevel] = BrownFg
levelToColor[zap.ErrorLevel] = RedFg
levelToColor[zap.DPanicLevel] = RedFg
levelToColor[zap.PanicLevel] = RedFg
levelToColor[zap.FatalLevel] = RedFg
}
type Encoder struct {
*jsonEncoder
showLevel bool
showLoggerName bool
showCallerName bool
showFullCaller bool
showStacktrace bool
showTime bool
enableAnsiColor bool
}
func NewEncoder(verbosity int) zapcore.Encoder {
isDebug := os.Getenv("DEBUG") != "" || os.Getenv("TRACE") != ""
isInfo := os.Getenv("INFO") != ""
isTTY := terminal.IsTerminal(int(os.Stdout.Fd()))
return &Encoder{
jsonEncoder: newJSONEncoder(zapcore.EncoderConfig{
EncodeDuration: zapcore.StringDurationEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
}, true),
showLevel: isInfo || isDebug || verbosity >= 1,
showLoggerName: isInfo || isDebug || verbosity >= 1,
showTime: isInfo || isDebug || verbosity >= 1,
showCallerName: isInfo || isDebug || verbosity >= 1,
showFullCaller: verbosity >= 4,
// Also always forced displayed on "Error" level and above
showStacktrace: isInfo || isDebug || verbosity >= 2,
enableAnsiColor: isTTY,
}
}
func NewLightWeightEncoder() zapcore.Encoder {
return &Encoder{
jsonEncoder: newJSONEncoder(zapcore.EncoderConfig{
EncodeDuration: zapcore.StringDurationEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
}, true),
showLevel: false,
showLoggerName: false,
showTime: false,
showCallerName: false,
showFullCaller: false,
// Also always forced displayed on "Error" level and above
showStacktrace: false,
enableAnsiColor: true,
}
}
func (c Encoder) Clone() zapcore.Encoder {
return &Encoder{
jsonEncoder: c.jsonEncoder.Clone().(*jsonEncoder),
showLevel: c.showLevel,
showLoggerName: c.showLoggerName,
showStacktrace: c.showStacktrace,
showCallerName: c.showCallerName,
showFullCaller: c.showFullCaller,
showTime: c.showTime,
enableAnsiColor: c.enableAnsiColor,
}
}
func (c Encoder) colorString(color, s string) (out string) {
if c.enableAnsiColor {
out += ansiColorEscape + color + "m"
}
out += s
if c.enableAnsiColor {
out += clearANSIModifier
}
return
}
func (c Encoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
line := bufferpool.Get()
lineColor := levelColor(ent.Level)
if c.showTime {
line.AppendString(c.colorString(grayFg.Nos(true), ent.Time.Format("2006-01-02T15:04:05.000Z0700")+" "))
}
showLoggerName := c.showLoggerName && ent.LoggerName != ""
if showLoggerName {
loggerName := ent.LoggerName
if loggerName == "common" && ent.Caller.Defined {
base := path.Base(ent.Caller.FullPath())
packagePath := strings.Split(base, ".")[0]
if packagePath != "" {
loggerName = packagePath
}
}
line.AppendString(c.colorString(BlueFg.Nos(true), "("+loggerName+") "))
}
message := ent.Message
if strings.HasSuffix(message, ".") && !strings.HasSuffix(message, "...") {
message = strings.TrimSuffix(message, ".")
}
line.AppendString(c.colorString(lineColor.Nos(true), message))
showCaller := (c.showCallerName || zap.WarnLevel.Enabled(ent.Level)) && ent.Caller.Defined
if showCaller && ent.LoggerName != "box" {
callerPath := ent.Caller.TrimmedPath()
if !c.showFullCaller {
callerPath = maybeRemovePackageVersion(callerPath)
}
line.AppendString(c.colorString(BlueFg.Nos(true), " ("+callerPath+")"))
}
// Add any structured context even if len(fields) == 0 because there could be implicit (With()) fields
if c.enableAnsiColor {
line.AppendString(ansiColorEscape + grayFg.Nos(true) + "m ")
}
c.writeJSONFields(line, fields)
if c.enableAnsiColor {
line.AppendString(clearANSIModifier)
}
if ent.Stack != "" && (c.showStacktrace || zap.ErrorLevel.Enabled(ent.Level)) {
line.AppendString("\n" + c.colorString(lineColor.Nos(true), ent.Stack))
}
line.AppendString("\n")
return line, nil
}
func maybeRemovePackageVersion(input string) string {
atIndex := strings.Index(input, "@")
if atIndex == -1 {
return input
}
cutUpToIndex := strings.LastIndex(input, "/")
if cutUpToIndex == -1 {
return input
}
return input[0:atIndex] + input[cutUpToIndex:]
}
func (c Encoder) writeJSONFields(line *buffer.Buffer, extra []zapcore.Field) {
context := c.Clone().(*Encoder)
defer context.buf.Free()
addFields(context, extra)
context.closeOpenNamespaces()
if context.buf.Len() == 0 {
return
}
line.AppendByte('{')
line.Write(context.buf.Bytes())
line.AppendByte('}')
}
func levelColor(level zapcore.Level) aurora.Color {
color := levelToColor[level]
if color == 0 {
color = BlueFg
}
return color
}
func addFields(enc zapcore.ObjectEncoder, fields []zapcore.Field) {
for i := range fields {
fields[i].AddTo(enc)
}
}
// Copied from `github.com/uber-go/zap/zapcore/json_encoder.go`
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _jsonPool = sync.Pool{New: func() interface{} {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
enc.reflectBuf.Free()
}
enc.EncoderConfig = nil
enc.buf = nil
enc.spaced = false
enc.openNamespaces = 0
enc.reflectBuf = nil
enc.reflectEnc = nil
_jsonPool.Put(enc)
}
type jsonEncoder struct {
*zapcore.EncoderConfig
buf *buffer.Buffer
spaced bool // include spaces after colons and commas
openNamespaces int
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
reflectEnc *json.Encoder
}
func newJSONEncoder(cfg zapcore.EncoderConfig, spaced bool) *jsonEncoder {
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
spaced: spaced,
}
}
func (enc *jsonEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error {
enc.addKey(key)
return enc.AppendArray(arr)
}
func (enc *jsonEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error {
enc.addKey(key)
return enc.AppendObject(obj)
}
func (enc *jsonEncoder) AddBinary(key string, val []byte) {
enc.AddString(key, base64.StdEncoding.EncodeToString(val))
}
func (enc *jsonEncoder) AddByteString(key string, val []byte) {
enc.addKey(key)
enc.AppendByteString(val)
}
func (enc *jsonEncoder) AddBool(key string, val bool) {
enc.addKey(key)
enc.AppendBool(val)
}
func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.addKey(key)
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
}
func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.addKey(key)
enc.AppendFloat64(val)
}
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
}
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
// For consistency with our custom JSON encoder.
enc.reflectEnc.SetEscapeHTML(false)
} else {
enc.reflectBuf.Reset()
}
}
var nullLiteralBytes = []byte("null")
// Only invoke the standard JSON encoder if there is actually something to
// encode; otherwise write JSON null literal directly.
func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
if obj == nil {
return nullLiteralBytes, nil
}
enc.resetReflectBuf()
if err := enc.reflectEnc.Encode(obj); err != nil {
return nil, err
}
enc.reflectBuf.TrimNewline()
return enc.reflectBuf.Bytes(), nil
}
func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
valueBytes, err := enc.encodeReflected(obj)
if err != nil {
return err
}
enc.addKey(key)
_, err = enc.buf.Write(valueBytes)
return err
}
func (enc *jsonEncoder) OpenNamespace(key string) {
enc.addKey(key)
enc.buf.AppendByte('{')
enc.openNamespaces++
}
func (enc *jsonEncoder) AddString(key, val string) {
enc.addKey(key)
enc.AppendString(val)
}
func (enc *jsonEncoder) AddTime(key string, val time.Time) {
enc.addKey(key)
enc.AppendTime(val)
}
func (enc *jsonEncoder) AddUint64(key string, val uint64) {
enc.addKey(key)
enc.AppendUint64(val)
}
func (enc *jsonEncoder) AppendArray(arr zapcore.ArrayMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('[')
err := arr.MarshalLogArray(enc)
enc.buf.AppendByte(']')
return err
}
func (enc *jsonEncoder) AppendObject(obj zapcore.ObjectMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
return err
}
func (enc *jsonEncoder) AppendBool(val bool) {
enc.addElementSeparator()
enc.buf.AppendBool(val)
}
func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddByteString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendByte('+')
enc.buf.AppendFloat(i, 64)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendDuration(val time.Duration) {
cur := enc.buf.Len()
enc.EncodeDuration(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
// JSON valid.
enc.AppendInt64(int64(val))
}
}
func (enc *jsonEncoder) AppendInt64(val int64) {
enc.addElementSeparator()
enc.buf.AppendInt(val)
}
func (enc *jsonEncoder) AppendReflected(val interface{}) error {
valueBytes, err := enc.encodeReflected(val)
if err != nil {
return err
}
enc.addElementSeparator()
_, err = enc.buf.Write(valueBytes)
return err
}
func (enc *jsonEncoder) AppendString(val string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
enc.buf.AppendByte('"')
enc.buf.AppendTime(time, layout)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendTime(val time.Time) {
cur := enc.buf.Len()
enc.EncodeTime(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
// output JSON valid.
enc.AppendInt64(val.UnixNano())
}
}
func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.addElementSeparator()
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() zapcore.Encoder {
clone := enc.clone()
clone.buf.Write(enc.buf.Bytes())
return clone
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
clone.buf = bufferpool.Get()
return clone
}
func (enc *jsonEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
final := enc.clone()
final.buf.AppendByte('{')
if final.LevelKey != "" {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
if cur == final.buf.Len() {
// User-supplied EncodeLevel was a no-op. Fall back to strings to keep
// output JSON valid.
final.AppendString(ent.Level.String())
}
}
if final.TimeKey != "" {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
final.addKey(final.NameKey)
cur := final.buf.Len()
nameEncoder := final.EncodeName
// if no name encoder provided, fall back to FullNameEncoder for backwards
// compatibility
if nameEncoder == nil {
nameEncoder = zapcore.FullNameEncoder
}
nameEncoder(ent.LoggerName, final)
if cur == final.buf.Len() {
// User-supplied EncodeName was a no-op. Fall back to strings to
// keep output JSON valid.
final.AppendString(ent.LoggerName)
}
}
if ent.Caller.Defined && final.CallerKey != "" {
final.addKey(final.CallerKey)
cur := final.buf.Len()
final.EncodeCaller(ent.Caller, final)
if cur == final.buf.Len() {
// User-supplied EncodeCaller was a no-op. Fall back to strings to
// keep output JSON valid.
final.AppendString(ent.Caller.String())
}
}
if final.MessageKey != "" {
final.addKey(enc.MessageKey)
final.AppendString(ent.Message)
}
if enc.buf.Len() > 0 {
final.addElementSeparator()
final.buf.Write(enc.buf.Bytes())
}
addFields(final, fields)
final.closeOpenNamespaces()
if ent.Stack != "" && final.StacktraceKey != "" {
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
if final.LineEnding != "" {
final.buf.AppendString(final.LineEnding)
} else {
final.buf.AppendString(zapcore.DefaultLineEnding)
}
ret := final.buf
putJSONEncoder(final)
return ret, nil
}
func (enc *jsonEncoder) truncate() {
enc.buf.Reset()
}
func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
}
func (enc *jsonEncoder) addKey(key string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(key)
enc.buf.AppendByte('"')
enc.buf.AppendByte(':')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
func (enc *jsonEncoder) addElementSeparator() {
last := enc.buf.Len() - 1
if last < 0 {
return
}
switch enc.buf.Bytes()[last] {
case '{', '[', ':', ',', ' ':
return
default:
enc.buf.AppendByte(',')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
}
func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
enc.addElementSeparator()
switch {
case math.IsNaN(val):
enc.buf.AppendString(`"NaN"`)
case math.IsInf(val, 1):
enc.buf.AppendString(`"+Inf"`)
case math.IsInf(val, -1):
enc.buf.AppendString(`"-Inf"`)
default:
enc.buf.AppendFloat(val, bitSize)
}
}
// safeAddString JSON-escapes a string and appends it to the internal buffer.
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
}
return false
}
| [
"\"DEBUG\"",
"\"TRACE\"",
"\"INFO\""
] | [] | [
"INFO",
"TRACE",
"DEBUG"
] | [] | ["INFO", "TRACE", "DEBUG"] | go | 3 | 0 | |
tot/chrome.go | /*
Package chrome aims to be a complete Chrome DevTools Protocol Viewer
implementation.
This version implements the Tip-of-Tree API. See
https://chromedevtools.github.io/devtools-protocol/tot/ for details.
*/
package chrome
import (
"os"
"github.com/bdlm/log"
)
/*
If a LOG_LEVEL environment variable exists set that value as the log level.
Useful during development.
*/
func init() {
levelFlag := os.Getenv("LOG_LEVEL")
if "" == levelFlag {
levelFlag = "info"
}
level, err := log.ParseLevel(levelFlag)
if nil == err {
log.SetLevel(level)
}
}
/*
Version is a struct representing the Chromium version information.
*/
type Version struct {
Browser string `json:"browser"`
ProtocolVersion string `json:"protocol-version"`
UserAgent string `json:"user-agent"`
V8Version string `json:"v8-version"`
WebKitVersion string `json:"webkit-version"`
WebSocketDebuggerURL string `json:"webSocketDebuggerUrl"`
}
| [
"\"LOG_LEVEL\""
] | [] | [
"LOG_LEVEL"
] | [] | ["LOG_LEVEL"] | go | 1 | 0 | |
test/e2e/e2e_setup_test.go | package e2e
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"testing"
"time"
gravitytypes "github.com/Gravity-Bridge/Gravity-Bridge/module/x/gravity/types"
"github.com/cosmos/cosmos-sdk/server"
srvconfig "github.com/cosmos/cosmos-sdk/server/config"
sdk "github.com/cosmos/cosmos-sdk/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
bech32ibctypes "github.com/osmosis-labs/bech32-ibc/x/bech32ibc/types"
"github.com/spf13/viper"
"github.com/stretchr/testify/suite"
tmconfig "github.com/tendermint/tendermint/config"
tmjson "github.com/tendermint/tendermint/libs/json"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
"github.com/umee-network/umee/v2/app"
leveragetypes "github.com/umee-network/umee/v2/x/leverage/types"
)
const (
photonDenom = "photon"
initBalanceStr = "110000000000uumee,100000000000photon"
minGasPrice = "0.00001"
gaiaChainID = "test-gaia-chain"
ethChainID uint = 15
ethMinerPK = "0xb1bab011e03a9862664706fc3bbaa1b16651528e5f0e7fbfcbfdd8be302a13e7"
)
var (
stakeAmount, _ = sdk.NewIntFromString("100000000000")
stakeAmountCoin = sdk.NewCoin(app.BondDenom, stakeAmount)
)
type IntegrationTestSuite struct {
suite.Suite
tmpDirs []string
chain *chain
ethClient *ethclient.Client
dkrPool *dockertest.Pool
dkrNet *dockertest.Network
ethResource *dockertest.Resource
valResources []*dockertest.Resource
orchResources []*dockertest.Resource
gravityContractAddr string
}
func TestIntegrationTestSuite(t *testing.T) {
suite.Run(t, new(IntegrationTestSuite))
}
func (s *IntegrationTestSuite) SetupSuite() {
s.T().Log("setting up e2e integration test suite...")
var err error
s.chain, err = newChain()
s.Require().NoError(err)
s.T().Logf("starting e2e infrastructure; chain-id: %s; datadir: %s", s.chain.id, s.chain.dataDir)
s.dkrPool, err = dockertest.NewPool("")
s.Require().NoError(err)
s.dkrNet, err = s.dkrPool.CreateNetwork(fmt.Sprintf("%s-testnet", s.chain.id))
s.Require().NoError(err)
var useGanache bool
if str := os.Getenv("PEGGO_E2E_USE_GANACHE"); len(str) > 0 {
useGanache, err = strconv.ParseBool(str)
s.Require().NoError(err)
}
// The boostrapping phase is as follows:
//
// 1. Initialize Umee validator nodes.
// 2. Launch an Ethereum (or Ganache) container that mines.
// 3. Create and initialize Umee validator genesis files (setting delegate keys for validators).
// 4. Start Umee network.
// 5. Deploy the Gravity Bridge contract
// 6. Create and start peggo (orchestrator) containers.
s.initNodes()
if useGanache {
s.runGanacheContainer()
} else {
s.initEthereum()
s.runEthContainer()
}
s.initGenesis()
s.initValidatorConfigs()
s.runValidators()
s.runContractDeployment()
s.runOrchestrators()
}
func (s *IntegrationTestSuite) TearDownSuite() {
if str := os.Getenv("PEGGO_E2E_SKIP_CLEANUP"); len(str) > 0 {
skipCleanup, err := strconv.ParseBool(str)
s.Require().NoError(err)
if skipCleanup {
return
}
}
s.T().Log("tearing down e2e integration test suite...")
s.Require().NoError(s.dkrPool.Purge(s.ethResource))
for _, vc := range s.valResources {
s.Require().NoError(s.dkrPool.Purge(vc))
}
for _, oc := range s.orchResources {
s.Require().NoError(s.dkrPool.Purge(oc))
}
s.Require().NoError(s.dkrPool.RemoveNetwork(s.dkrNet))
os.RemoveAll(s.chain.dataDir)
for _, td := range s.tmpDirs {
os.RemoveAll(td)
}
}
func (s *IntegrationTestSuite) initNodes() {
s.Require().NoError(s.chain.createAndInitValidators(2))
s.Require().NoError(s.chain.createAndInitOrchestrators(2))
// initialize a genesis file for the first validator
val0ConfigDir := s.chain.validators[0].configDir()
for _, val := range s.chain.validators {
s.Require().NoError(
addGenesisAccount(val0ConfigDir, "", initBalanceStr, val.keyInfo.GetAddress()),
)
}
// add orchestrator accounts to genesis file
for _, orch := range s.chain.orchestrators {
s.Require().NoError(
addGenesisAccount(val0ConfigDir, "", initBalanceStr, orch.keyInfo.GetAddress()),
)
}
// copy the genesis file to the remaining validators
for _, val := range s.chain.validators[1:] {
_, err := copyFile(
filepath.Join(val0ConfigDir, "config", "genesis.json"),
filepath.Join(val.configDir(), "config", "genesis.json"),
)
s.Require().NoError(err)
}
}
func (s *IntegrationTestSuite) initEthereum() {
// generate ethereum keys for validators add them to the ethereum genesis
ethGenesis := EthereumGenesis{
Difficulty: "0x400",
GasLimit: "0xB71B00",
Config: EthereumConfig{ChainID: ethChainID},
Alloc: make(map[string]Allocation, len(s.chain.validators)+1),
}
alloc := Allocation{
Balance: "0x1337000000000000000000",
}
ethGenesis.Alloc["0xBf660843528035a5A4921534E156a27e64B231fE"] = alloc
for _, orch := range s.chain.orchestrators {
s.Require().NoError(orch.generateEthereumKey())
ethGenesis.Alloc[orch.ethereumKey.address] = alloc
}
ethGenBz, err := json.MarshalIndent(ethGenesis, "", " ")
s.Require().NoError(err)
// write out the genesis file
s.Require().NoError(writeFile(filepath.Join(s.chain.configDir(), "eth_genesis.json"), ethGenBz))
}
func (s *IntegrationTestSuite) initGenesis() {
serverCtx := server.NewDefaultContext()
config := serverCtx.Config
config.SetRoot(s.chain.validators[0].configDir())
config.Moniker = s.chain.validators[0].moniker
genFilePath := config.GenesisFile()
appGenState, genDoc, err := genutiltypes.GenesisStateFromGenFile(genFilePath)
s.Require().NoError(err)
var gravityGenState gravitytypes.GenesisState
s.Require().NoError(cdc.UnmarshalJSON(appGenState[gravitytypes.ModuleName], &gravityGenState))
gravityGenState.Params.BridgeChainId = uint64(ethChainID)
bz, err := cdc.MarshalJSON(&gravityGenState)
s.Require().NoError(err)
appGenState[gravitytypes.ModuleName] = bz
var leverageGenState leveragetypes.GenesisState
s.Require().NoError(cdc.UnmarshalJSON(appGenState[leveragetypes.ModuleName], &leverageGenState))
leverageGenState.Registry = append(leverageGenState.Registry, leveragetypes.Token{
BaseDenom: app.BondDenom,
SymbolDenom: app.DisplayDenom,
Exponent: 6,
ReserveFactor: sdk.MustNewDecFromStr("0.100000000000000000"),
CollateralWeight: sdk.MustNewDecFromStr("0.050000000000000000"),
LiquidationThreshold: sdk.MustNewDecFromStr("0.050000000000000000"),
BaseBorrowRate: sdk.MustNewDecFromStr("0.020000000000000000"),
KinkBorrowRate: sdk.MustNewDecFromStr("0.200000000000000000"),
MaxBorrowRate: sdk.MustNewDecFromStr("1.50000000000000000"),
KinkUtilizationRate: sdk.MustNewDecFromStr("0.200000000000000000"),
LiquidationIncentive: sdk.MustNewDecFromStr("0.180000000000000000"),
})
bz, err = cdc.MarshalJSON(&leverageGenState)
s.Require().NoError(err)
appGenState[leveragetypes.ModuleName] = bz
var bech32GenState bech32ibctypes.GenesisState
s.Require().NoError(cdc.UnmarshalJSON(appGenState[bech32ibctypes.ModuleName], &bech32GenState))
bech32GenState.NativeHRP = sdk.GetConfig().GetBech32AccountAddrPrefix()
bz, err = cdc.MarshalJSON(&bech32GenState)
s.Require().NoError(err)
appGenState[bech32ibctypes.ModuleName] = bz
var bankGenState banktypes.GenesisState
s.Require().NoError(cdc.UnmarshalJSON(appGenState[banktypes.ModuleName], &bankGenState))
bankGenState.DenomMetadata = append(bankGenState.DenomMetadata, banktypes.Metadata{
Description: "An example stable token",
Display: photonDenom,
Base: photonDenom,
Symbol: photonDenom,
Name: photonDenom,
DenomUnits: []*banktypes.DenomUnit{
{
Denom: photonDenom,
Exponent: 0,
},
},
})
bz, err = cdc.MarshalJSON(&bankGenState)
s.Require().NoError(err)
appGenState[banktypes.ModuleName] = bz
var genUtilGenState genutiltypes.GenesisState
s.Require().NoError(cdc.UnmarshalJSON(appGenState[genutiltypes.ModuleName], &genUtilGenState))
// generate genesis txs
genTxs := make([]json.RawMessage, len(s.chain.validators))
for i, val := range s.chain.validators {
createValmsg, err := val.buildCreateValidatorMsg(stakeAmountCoin)
s.Require().NoError(err)
delKeysMsg, err := val.buildDelegateKeysMsg(s.chain.orchestrators[i].keyInfo.GetAddress(), s.chain.orchestrators[i].ethereumKey.address)
s.Require().NoError(err)
signedTx, err := val.signMsg(createValmsg, delKeysMsg)
s.Require().NoError(err)
txRaw, err := cdc.MarshalJSON(signedTx)
s.Require().NoError(err)
genTxs[i] = txRaw
}
genUtilGenState.GenTxs = genTxs
bz, err = cdc.MarshalJSON(&genUtilGenState)
s.Require().NoError(err)
appGenState[genutiltypes.ModuleName] = bz
bz, err = json.MarshalIndent(appGenState, "", " ")
s.Require().NoError(err)
genDoc.AppState = bz
bz, err = tmjson.MarshalIndent(genDoc, "", " ")
s.Require().NoError(err)
// write the updated genesis file to each validator
for _, val := range s.chain.validators {
writeFile(filepath.Join(val.configDir(), "config", "genesis.json"), bz)
}
}
func (s *IntegrationTestSuite) initValidatorConfigs() {
for i, val := range s.chain.validators {
tmCfgPath := filepath.Join(val.configDir(), "config", "config.toml")
vpr := viper.New()
vpr.SetConfigFile(tmCfgPath)
s.Require().NoError(vpr.ReadInConfig())
valConfig := &tmconfig.Config{}
s.Require().NoError(vpr.Unmarshal(valConfig))
valConfig.P2P.ListenAddress = "tcp://0.0.0.0:26656"
valConfig.P2P.AddrBookStrict = false
valConfig.P2P.ExternalAddress = fmt.Sprintf("%s:%d", val.instanceName(), 26656)
valConfig.RPC.ListenAddress = "tcp://0.0.0.0:26657"
valConfig.StateSync.Enable = false
valConfig.LogLevel = "info"
var peers []string
for j := 0; j < len(s.chain.validators); j++ {
if i == j {
continue
}
peer := s.chain.validators[j]
peerID := fmt.Sprintf("%s@%s%d:26656", peer.nodeKey.ID(), peer.moniker, j)
peers = append(peers, peerID)
}
valConfig.P2P.PersistentPeers = strings.Join(peers, ",")
tmconfig.WriteConfigFile(tmCfgPath, valConfig)
// set application configuration
appCfgPath := filepath.Join(val.configDir(), "config", "app.toml")
appConfig := srvconfig.DefaultConfig()
appConfig.API.Enable = true
appConfig.MinGasPrices = fmt.Sprintf("%s%s", minGasPrice, photonDenom)
srvconfig.WriteConfigFile(appCfgPath, appConfig)
}
}
func (s *IntegrationTestSuite) runGanacheContainer() {
s.T().Log("starting Ganache container...")
tmpDir, err := ioutil.TempDir("", "umee-e2e-testnet-eth-")
s.Require().NoError(err)
s.tmpDirs = append(s.tmpDirs, tmpDir)
_, err = copyFile(
filepath.Join("./docker/", "ganache.Dockerfile"),
filepath.Join(tmpDir, "ganache.Dockerfile"),
)
s.Require().NoError(err)
entrypoint := []string{
"ganache-cli",
"-h",
"0.0.0.0",
"--networkId",
"15",
}
entrypoint = append(entrypoint, "--account", "0xb1bab011e03a9862664706fc3bbaa1b16651528e5f0e7fbfcbfdd8be302a13e7,0x3635C9ADC5DEA00000")
for _, orch := range s.chain.orchestrators {
s.Require().NoError(orch.generateEthereumKey())
entrypoint = append(entrypoint, "--account", orch.ethereumKey.privateKey+",0x3635C9ADC5DEA00000")
}
s.ethResource, err = s.dkrPool.BuildAndRunWithBuildOptions(
&dockertest.BuildOptions{
Dockerfile: "ganache.Dockerfile",
ContextDir: tmpDir,
},
&dockertest.RunOptions{
Name: "ganache",
NetworkID: s.dkrNet.Network.ID,
ExposedPorts: []string{"8545"},
PortBindings: map[docker.Port][]docker.PortBinding{
"8545/tcp": {{HostIP: "", HostPort: "8545"}},
},
Env: []string{},
Entrypoint: entrypoint,
},
noRestart,
)
s.Require().NoError(err)
s.ethClient, err = ethclient.Dial(fmt.Sprintf("http://%s", s.ethResource.GetHostPort("8545/tcp")))
s.Require().NoError(err)
match := "Listening on 0.0.0.0:8545"
var (
outBuf bytes.Buffer
errBuf bytes.Buffer
)
// Wait for Ganache to start running.
s.Require().Eventually(
func() bool {
err := s.dkrPool.Client.Logs(
docker.LogsOptions{
Container: s.ethResource.Container.ID,
OutputStream: &outBuf,
ErrorStream: &errBuf,
Stdout: true,
Stderr: true,
},
)
if err != nil {
return false
}
return strings.Contains(outBuf.String(), match)
},
1*time.Minute,
5*time.Second,
"ganache node failed to start",
)
s.T().Logf("started Ganache container: %s", s.ethResource.Container.ID)
}
func (s *IntegrationTestSuite) runEthContainer() {
s.T().Log("starting Ethereum container...")
tmpDir, err := ioutil.TempDir("", "umee-e2e-testnet-eth-")
s.Require().NoError(err)
s.tmpDirs = append(s.tmpDirs, tmpDir)
_, err = copyFile(
filepath.Join(s.chain.configDir(), "eth_genesis.json"),
filepath.Join(tmpDir, "eth_genesis.json"),
)
s.Require().NoError(err)
_, err = copyFile(
filepath.Join("./docker/", "eth.Dockerfile"),
filepath.Join(tmpDir, "eth.Dockerfile"),
)
s.Require().NoError(err)
s.ethResource, err = s.dkrPool.BuildAndRunWithBuildOptions(
&dockertest.BuildOptions{
Dockerfile: "eth.Dockerfile",
ContextDir: tmpDir,
},
&dockertest.RunOptions{
Name: "ethereum",
NetworkID: s.dkrNet.Network.ID,
PortBindings: map[docker.Port][]docker.PortBinding{
"8545/tcp": {{HostIP: "", HostPort: "8545"}},
},
Env: []string{},
},
noRestart,
)
s.Require().NoError(err)
s.ethClient, err = ethclient.Dial(fmt.Sprintf("http://%s", s.ethResource.GetHostPort("8545/tcp")))
s.Require().NoError(err)
// Wait for the Ethereum node to start producing blocks; DAG completion takes
// about two minutes.
s.Require().Eventually(
func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
height, err := s.ethClient.BlockNumber(ctx)
if err != nil {
return false
}
return height > 1
},
5*time.Minute,
10*time.Second,
"geth node failed to produce a block",
)
s.T().Logf("started Ethereum container: %s", s.ethResource.Container.ID)
}
func (s *IntegrationTestSuite) runValidators() {
s.T().Log("starting Umee validator containers...")
s.valResources = make([]*dockertest.Resource, len(s.chain.validators))
for i, val := range s.chain.validators {
runOpts := &dockertest.RunOptions{
Name: val.instanceName(),
NetworkID: s.dkrNet.Network.ID,
Mounts: []string{
fmt.Sprintf("%s/:/root/.umee", val.configDir()),
},
Repository: "umeenet/peggo",
Entrypoint: []string{
"umeed",
"start",
},
}
// expose the first validator for debugging and communication
if val.index == 0 {
runOpts.PortBindings = map[docker.Port][]docker.PortBinding{
"1317/tcp": {{HostIP: "", HostPort: "1317"}},
"6060/tcp": {{HostIP: "", HostPort: "6060"}},
"6061/tcp": {{HostIP: "", HostPort: "6061"}},
"6062/tcp": {{HostIP: "", HostPort: "6062"}},
"6063/tcp": {{HostIP: "", HostPort: "6063"}},
"6064/tcp": {{HostIP: "", HostPort: "6064"}},
"6065/tcp": {{HostIP: "", HostPort: "6065"}},
"9090/tcp": {{HostIP: "", HostPort: "9090"}},
"26656/tcp": {{HostIP: "", HostPort: "26656"}},
"26657/tcp": {{HostIP: "", HostPort: "26657"}},
}
}
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart)
s.Require().NoError(err)
s.valResources[i] = resource
s.T().Logf("started Umee validator container: %s", resource.Container.ID)
}
rpcClient, err := rpchttp.New("tcp://localhost:26657", "/websocket")
s.Require().NoError(err)
s.Require().Eventually(
func() bool {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
status, err := rpcClient.Status(ctx)
if err != nil {
return false
}
// let the node produce a few blocks
if status.SyncInfo.CatchingUp || status.SyncInfo.LatestBlockHeight < 3 {
return false
}
return true
},
5*time.Minute,
time.Second,
"umee node failed to produce blocks",
)
}
func (s *IntegrationTestSuite) runContractDeployment() {
s.T().Log("starting Gravity Bridge contract deployer container...")
resource, err := s.dkrPool.RunWithOptions(
&dockertest.RunOptions{
Name: "gravity-contract-deployer",
NetworkID: s.dkrNet.Network.ID,
Repository: "umeenet/peggo",
// NOTE: container names are prefixed with '/'
Env: []string{"PEGGO_ETH_PK=" + ethMinerPK},
Entrypoint: []string{
"peggo",
"bridge",
"deploy-gravity",
"--eth-rpc",
fmt.Sprintf("http://%s:8545", s.ethResource.Container.Name[1:]),
"--cosmos-grpc",
fmt.Sprintf("tcp://%s:9090", s.valResources[0].Container.Name[1:]),
"--tendermint-rpc",
fmt.Sprintf("http://%s:26657", s.valResources[0].Container.Name[1:]),
},
},
noRestart,
)
s.Require().NoError(err)
s.T().Logf("started contract deployer: %s", resource.Container.ID)
// wait for the container to finish executing
container := resource.Container
for container.State.Running {
time.Sleep(10 * time.Second)
container, err = s.dkrPool.Client.InspectContainer(resource.Container.ID)
s.Require().NoError(err)
}
var (
outBuf bytes.Buffer
errBuf bytes.Buffer
)
s.Require().NoErrorf(s.dkrPool.Client.Logs(
docker.LogsOptions{
Container: resource.Container.ID,
OutputStream: &outBuf,
ErrorStream: &errBuf,
Stdout: true,
Stderr: true,
},
),
"failed to start contract deployer; stdout: %s, stderr: %s",
outBuf.String(), errBuf.String(),
)
re := regexp.MustCompile(`Address: (0x.+)`)
tokens := re.FindStringSubmatch(errBuf.String())
s.Require().Len(tokens, 2)
gravityContractAddr := tokens[1]
s.Require().NotEmpty(gravityContractAddr)
re = regexp.MustCompile(`Transaction: (0x.+)`)
tokens = re.FindStringSubmatch(errBuf.String())
s.Require().Len(tokens, 2)
txHash := tokens[1]
s.Require().NotEmpty(txHash)
s.Require().Eventually(
func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := queryEthTx(ctx, s.ethClient, txHash); err != nil {
return false
}
return true
},
time.Minute,
time.Second,
"failed to confirm Peggy contract deployment transaction",
)
s.Require().NoError(s.dkrPool.RemoveContainerByName(container.Name))
s.T().Logf("deployed Gravity Bridge contract: %s", gravityContractAddr)
s.gravityContractAddr = gravityContractAddr
}
func (s *IntegrationTestSuite) registerValidatorOrchAddresses() {
s.T().Log("registering Umee validator Ethereum keys...")
for i := range s.chain.validators {
s.registerOrchAddresses(i, "10photon")
}
}
func (s *IntegrationTestSuite) runOrchestrators() {
s.T().Log("starting orchestrator containers...")
s.orchResources = make([]*dockertest.Resource, len(s.chain.validators))
for i, orch := range s.chain.orchestrators {
resource, err := s.dkrPool.RunWithOptions(
&dockertest.RunOptions{
Name: s.chain.orchestrators[i].instanceName(),
NetworkID: s.dkrNet.Network.ID,
Repository: "umeenet/peggo",
Env: []string{
"PEGGO_ETH_PK=" + orch.ethereumKey.privateKey,
"PEGGO_COSMOS_PK=" + hexutil.Encode(s.chain.orchestrators[i].privateKey.Bytes()),
},
// NOTE: container names are prefixed with '/'
Entrypoint: []string{
"peggo",
"orchestrator",
s.gravityContractAddr,
"--eth-rpc",
fmt.Sprintf("http://%s:8545", s.ethResource.Container.Name[1:]),
"--cosmos-chain-id",
s.chain.id,
"--cosmos-grpc",
fmt.Sprintf("tcp://%s:9090", s.valResources[i].Container.Name[1:]),
"--tendermint-rpc",
fmt.Sprintf("http://%s:26657", s.valResources[i].Container.Name[1:]),
"--cosmos-gas-prices",
fmt.Sprintf("%s%s", minGasPrice, photonDenom),
"--cosmos-from",
s.chain.orchestrators[i].keyInfo.GetName(),
"--relay-batches=true",
"--valset-relay-mode=minimum",
"--profit-multiplier=0.0",
"--relayer-loop-multiplier=1.0",
"--requester-loop-multiplier=1.0",
},
},
noRestart,
)
s.Require().NoError(err)
s.orchResources[i] = resource
s.T().Logf("started orchestrator container: %s", resource.Container.ID)
}
match := "oracle sent set of claims successfully"
for _, resource := range s.orchResources {
s.T().Logf("waiting for orchestrator to be healthy: %s", resource.Container.ID)
var (
outBuf bytes.Buffer
errBuf bytes.Buffer
)
s.Require().Eventuallyf(
func() bool {
err := s.dkrPool.Client.Logs(
docker.LogsOptions{
Container: resource.Container.ID,
OutputStream: &outBuf,
ErrorStream: &errBuf,
Stdout: true,
Stderr: true,
},
)
if err != nil {
return false
}
return strings.Contains(errBuf.String(), match)
},
5*time.Minute,
time.Second,
"orchestrator %s not healthy",
resource.Container.ID,
)
}
}
func noRestart(config *docker.HostConfig) {
// in this case we don't want the nodes to restart on failure
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
| [
"\"PEGGO_E2E_USE_GANACHE\"",
"\"PEGGO_E2E_SKIP_CLEANUP\""
] | [] | [
"PEGGO_E2E_USE_GANACHE",
"PEGGO_E2E_SKIP_CLEANUP"
] | [] | ["PEGGO_E2E_USE_GANACHE", "PEGGO_E2E_SKIP_CLEANUP"] | go | 2 | 0 | |
selfdrive/car/car_helpers.py | import os
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_legacy_fingerprint_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from cereal import car
EventName = car.CarEvent.EventName
def get_startup_event(car_recognized, controller_available, fuzzy_fingerprint, fw_seen):
event = EventName.startup
if not car_recognized:
if fw_seen:
event = EventName.startupNoCar
else:
event = EventName.startupNoFw
elif car_recognized and not controller_available:
event = EventName.startupNoControl
elif car_recognized and fuzzy_fingerprint:
event = EventName.startupFuzzyFingerprint
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if has_relay and not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
exact_fw_match, fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
exact_fw_match, fw_candidates, car_fw = True, set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_legacy_fingerprint_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = (all(len(cc) == 0 for cc in candidate_cars.values()) and frame > frame_fingerprint) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
exact_match = True
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
exact_match = exact_fw_match
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.event("fingerprinted", car_fingerprint=car_fingerprint,
source=source, fuzzy=not exact_match, fw_count=len(car_fw))
return car_fingerprint, finger, vin, car_fw, source, exact_match
def get_car(logcan, sendcan, has_relay=False):
candidate, fingerprints, vin, car_fw, source, exact_match = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "HYUNDAI NEXO"#mock
selected_car = Params().get("SelectedCar")
if selected_car:
candidate = selected_car.decode("utf-8")
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
car_params.fuzzyFingerprint = not exact_match
return CarInterface(car_params, CarController, CarState), car_params
| [] | [] | [
"SKIP_FW_QUERY",
"FINGERPRINT"
] | [] | ["SKIP_FW_QUERY", "FINGERPRINT"] | python | 2 | 0 | |
command_cluster_edit.go | package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
// DoClusterEdit is an implementation of cluster edit command
func DoClusterEdit(c *cli.Context) {
args := c.Args()
if len(args) != 2 {
showHelpAndExit(c, "<cluster> and <file> must be specified")
}
cluster, err := NewCluster(args[0])
if err != nil {
log.WithField("err", err).Fatal("failed to get a cluster")
}
path := filepath.Join(cluster.Path(), args[1])
editor := os.Getenv("EDITOR")
if editor == "" {
log.Fatal("$EDITOR is not set")
}
cmd := exec.Command(editor, path)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Run()
}
// ClusterEditCompletion provides cli completion of cluster edit command
func ClusterEditCompletion(c *cli.Context) {
args := c.Args()
if len(args) == 0 {
for _, c := range AllClusters() {
fmt.Println(c.Name)
}
} else if len(args) == 1 {
cluster, err := NewCluster(args[0])
if err != nil {
os.Exit(1)
}
list, err := listDirectory(cluster.Path())
for _, e := range list {
fmt.Println(e)
}
}
}
func listDirectory(dir string) ([]string, error) {
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
ret := make([]string, len(fis))
for i, fi := range fis {
ret[i] = fi.Name()
if fi.IsDir() {
ret[i] = ret[i] + "/"
}
}
return ret, nil
}
| [
"\"EDITOR\""
] | [] | [
"EDITOR"
] | [] | ["EDITOR"] | go | 1 | 0 | |
helper/path/basic.go | package path
import (
"errors"
"github.com/ehwjh2010/viper/constant"
"io/fs"
"os"
"path/filepath"
"strings"
)
var ErrPathAlreadyExist = errors.New("path already exist")
var ErrPathNoExist = errors.New("path no exist")
var ErrInvalidPath = errors.New("invalid path")
// EnsurePathExist 确认文件或文件夹是否存在
func EnsurePathExist(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if errors.Is(err, fs.ErrNotExist) {
return false, nil
}
return false, err
}
// MakeDir 创建单一目录, 不支持创建多级目录
// path 路径
// exist_no_error 路径已存在时是否返回错误
func MakeDir(path string, existReturnError bool) (err error) {
if path == "" {
return ErrInvalidPath
}
exist, err := EnsurePathExist(path)
if err != nil {
return
}
if exist {
if existReturnError {
return ErrPathAlreadyExist
} else {
return
}
} else {
err = os.Mkdir(path, 0777)
}
return
}
// MakeDirIfNotPresent 目录不存在, 则创建; 存在则不操作
// path 路径
func MakeDirIfNotPresent(path string) error {
return MakeDir(path, false)
}
// RemovePath 完全删除文件夹或文件, 对于文件夹包括子文件以及子文件夹
// path 路径
// noExistReturnError 路径不存在时是否返回错误
func RemovePath(path string, noExistReturnError bool) (bool, error) {
if path == "" {
return false, ErrInvalidPath
}
exists, err := EnsurePathExist(path)
if err != nil {
return false, err
}
if !exists {
if noExistReturnError {
return false, ErrPathNoExist
} else {
return true, nil
}
} else {
err := os.RemoveAll(path)
if err != nil {
return false, err
} else {
return true, nil
}
}
}
// JoinPath 路径拼接
func JoinPath(paths ...string) string {
p := filepath.Join(paths...)
return p
}
//Relative2Abs 相对路径转化绝对路径
func Relative2Abs(relativePath string) (string, error) {
if relativePath == "" {
return "", nil
}
if strings.HasPrefix(relativePath, constant.HomeShortCut) {
home := os.Getenv("HOME")
relativePath = strings.Replace(relativePath, constant.HomeShortCut, home, 1)
}
absPath, err := filepath.Abs(relativePath)
if err != nil {
return "", err
} else {
return absPath, nil
}
}
// PathSplit 路径分割, 返回目录以及文件名
func PathSplit(path string) (string, string) {
dirName := filepath.Dir(path)
fileName := filepath.Base(path)
return dirName, fileName
}
// MakeDirs 创建多级目录
func MakeDirs(path ...string) error {
tmp := JoinPath(path...)
if tmp == "" {
return nil
}
return os.MkdirAll(tmp, os.ModePerm)
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
vendor/github.com/elastic/beats/metricbeat/module/mongodb/testing.go | package mongodb
import "os"
// Helper functions for testing used in the mongodb metricsets
// GetEnvHost returns the hostname of the Mongodb server to use for testing.
// It reads the value from the MONGODB_HOST environment variable and returns
// 127.0.0.1 if it is not set.
func GetEnvHost() string {
host := os.Getenv("MONGODB_HOST")
if len(host) == 0 {
host = "127.0.0.1"
}
return host
}
// GetMongodbEnvPort returns the port of the Mongodb server to use for testing.
// It reads the value from the MONGODB_PORT environment variable and returns
// 27017 if it is not set.
func GetEnvPort() string {
port := os.Getenv("MONGODB_PORT")
if len(port) == 0 {
port = "27017"
}
return port
}
| [
"\"MONGODB_HOST\"",
"\"MONGODB_PORT\""
] | [] | [
"MONGODB_HOST",
"MONGODB_PORT"
] | [] | ["MONGODB_HOST", "MONGODB_PORT"] | go | 2 | 0 | |
examples/service/messaging/short_code/delete/short_code_delete_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/messaging/v1"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var messagingClient *v1.Messaging
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
messagingClient = twilio.NewWithCredentials(creds).Messaging.V1
}
func main() {
err := messagingClient.
Service("MGXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
ShortCode("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Delete()
if err != nil {
log.Panicf("%s", err.Error())
}
log.Println("Short code resource deleted")
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] | [] | [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] | [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
run.py | from __future__ import print_function
import os
import sys
import time
import logging
import tempfile
import traceback
import subprocess
from shutil import disk_usage, rmtree
try:
import pathlib
import importlib.util
except ImportError:
pass
class GIT(object):
@classmethod
def works(cls):
try:
return bool(subprocess.check_output('git --version', shell=True))
except:
return False
class PIP(object):
@classmethod
def run(cls, command, check_output=False):
if not cls.works():
raise RuntimeError("Could not import pip.")
try:
return PIP.run_python_m(*command.split(), check_output=check_output)
except subprocess.CalledProcessError as e:
return e.returncode
except:
traceback.print_exc()
print("Error using -m method")
@classmethod
def run_python_m(cls, *args, **kwargs):
check_output = kwargs.pop('check_output', False)
check = subprocess.check_output if check_output else subprocess.check_call
return check([sys.executable, '-m', 'pip'] + list(args))
@classmethod
def run_pip_main(cls, *args, **kwargs):
import pip
args = list(args)
check_output = kwargs.pop('check_output', False)
if check_output:
from io import StringIO
out = StringIO()
sys.stdout = out
try:
pip.main(args)
except:
traceback.print_exc()
finally:
sys.stdout = sys.__stdout__
out.seek(0)
pipdata = out.read()
out.close()
print(pipdata)
return pipdata
else:
return pip.main(args)
@classmethod
def run_install(cls, cmd, quiet=False, check_output=False):
return cls.run("install %s%s" % ('-q ' if quiet else '', cmd), check_output)
@classmethod
def run_show(cls, cmd, check_output=False):
return cls.run("show %s" % cmd, check_output)
@classmethod
def works(cls):
try:
import pip
return True
except ImportError:
return False
# noinspection PyTypeChecker
@classmethod
def get_module_version(cls, mod):
try:
out = cls.run_show(mod, check_output=True)
if isinstance(out, bytes):
out = out.decode()
datas = out.replace('\r\n', '\n').split('\n')
expectedversion = datas[3]
if expectedversion.startswith('Version: '):
return expectedversion.split()[1]
else:
return [x.split()[1] for x in datas if x.startswith("Version: ")][0]
except:
pass
@classmethod
def get_requirements(cls, file='requirements.txt'):
from pip.req import parse_requirements
return list(parse_requirements(file))
# Setup initial loggers
tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
log = logging.getLogger('launcher')
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(logging.Formatter(
fmt="[%(levelname)s] %(name)s: %(message)s"
))
sh.setLevel(logging.INFO)
log.addHandler(sh)
tfh = logging.StreamHandler(stream=tmpfile)
tfh.setFormatter(logging.Formatter(
fmt="[%(relativeCreated).9f] %(asctime)s - %(levelname)s - %(name)s: %(message)s"
))
tfh.setLevel(logging.DEBUG)
log.addHandler(tfh)
def finalize_logging():
if os.path.isfile("logs/musicbot.log"):
log.info("Moving old musicbot log")
try:
if os.path.isfile("logs/musicbot.log.last"):
os.unlink("logs/musicbot.log.last")
os.rename("logs/musicbot.log", "logs/musicbot.log.last")
except:
pass
with open("logs/musicbot.log", 'w', encoding='utf8') as f:
tmpfile.seek(0)
f.write(tmpfile.read())
tmpfile.close()
f.write('\n')
f.write(" PRE-RUN SANITY CHECKS PASSED ".center(80, '#'))
f.write('\n\n')
global tfh
log.removeHandler(tfh)
del tfh
fh = logging.FileHandler("logs/musicbot.log", mode='a')
fh.setFormatter(logging.Formatter(
fmt="[%(relativeCreated).9f] %(name)s-%(levelname)s: %(message)s"
))
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
sh.setLevel(logging.INFO)
dlog = logging.getLogger('discord')
dlh = logging.StreamHandler(stream=sys.stdout)
dlh.terminator = ''
dlh.setFormatter(logging.Formatter('.'))
dlog.addHandler(dlh)
def bugger_off(msg="Press enter to continue . . .", code=1):
input(msg)
sys.exit(code)
# TODO: all of this
def sanity_checks(optional=True):
log.info("Starting sanity checks")
## Required
# Make sure we're on Python 3.5+
req_ensure_py3()
# Fix windows encoding fuckery
req_ensure_encoding()
# Make sure we're in a writeable env
req_ensure_env()
# Make our folders if needed
req_ensure_folders()
log.info("Required checks passed.")
## Optional
if not optional:
return
# Check disk usage
opt_check_disk_space()
log.info("Optional checks passed.")
def req_ensure_py3():
log.info("Checking for Python 3.5+")
if sys.version_info < (3, 5):
log.warning("Python 3.5+ is required. This version is %s", sys.version.split()[0])
log.warning("Attempting to locate Python 3.5...")
pycom = None
if sys.platform.startswith('win'):
log.info('Trying "py -3.5"')
try:
subprocess.check_output('py -3.5 -c "exit()"', shell=True)
pycom = 'py -3.5'
except:
log.info('Trying "python3"')
try:
subprocess.check_output('python3 -c "exit()"', shell=True)
pycom = 'python3'
except:
pass
if pycom:
log.info("Python 3 found. Launching bot...")
pyexec(pycom, 'run.py')
# I hope ^ works
os.system('start cmd /k %s run.py' % pycom)
sys.exit(0)
else:
log.info('Trying "python3.5"')
try:
pycom = subprocess.check_output('python3.5 -c "exit()"'.split()).strip().decode()
except:
pass
if pycom:
log.info("\nPython 3 found. Re-launching bot using: %s run.py\n", pycom)
pyexec(pycom, 'run.py')
log.critical("Could not find Python 3.5 or higher. Please run the bot using Python 3.5")
bugger_off()
def req_ensure_encoding():
log.info("Checking console encoding")
if sys.platform.startswith('win') or sys.stdout.encoding.replace('-', '').lower() != 'utf8':
log.info("Setting console encoding to UTF-8")
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf8', line_buffering=True)
# only slightly evil
sys.__stdout__ = sh.stream = sys.stdout
if os.environ.get('PYCHARM_HOSTED', None) not in (None, '0'):
log.info("Enabling colors in pycharm pseudoconsole")
sys.stdout.isatty = lambda: True
def req_ensure_env():
log.info("Ensuring we're in the right environment")
try:
assert os.path.isdir('config'), 'folder "config" not found'
assert os.path.isdir('musicbot'), 'folder "musicbot" not found'
assert os.path.isdir('.git'), 'bot was not installed using Git. If you downloaded a ZIP, you did it wrong. Open http://bit.ly/dmbguide on your browser for official install steps.'
assert os.path.isfile('musicbot/__init__.py'), 'musicbot folder is not a Python module'
assert importlib.util.find_spec('musicbot'), "musicbot module is not importable"
except AssertionError as e:
log.critical("Failed environment check, %s", e)
bugger_off()
try:
os.mkdir('musicbot-test-folder')
except Exception:
log.critical("Current working directory does not seem to be writable")
log.critical("Please move the bot to a folder that is writable")
bugger_off()
finally:
rmtree('musicbot-test-folder', True)
if sys.platform.startswith('win'):
log.info("Adding local bins/ folder to path")
os.environ['PATH'] += ';' + os.path.abspath('bin/')
sys.path.append(os.path.abspath('bin/')) # might as well
def req_ensure_folders():
pathlib.Path('logs').mkdir(exist_ok=True)
pathlib.Path('data').mkdir(exist_ok=True)
def opt_check_disk_space(warnlimit_mb=200):
if disk_usage('.').free < warnlimit_mb*1024*2:
log.warning("Less than %sMB of free space remains on this device" % warnlimit_mb)
#################################################
def pyexec(pycom, *args, pycom2=None):
pycom2 = pycom2 or pycom
os.execlp(pycom, pycom2, *args)
def restart(*args):
pyexec(sys.executable, *args, *sys.argv, pycom2='python')
def main():
# TODO: *actual* argparsing
if '--no-checks' not in sys.argv:
sanity_checks()
finalize_logging()
import asyncio
tried_requirementstxt = False
tryagain = True
loops = 0
max_wait_time = 60
while tryagain:
# Maybe I need to try to import stuff first, then actually import stuff
# It'd save me a lot of pain with all that awful exception type checking
m = None
try:
from musicbot import MusicBot
m = MusicBot()
sh.terminator = ''
log.info("Connecting")
sh.terminator = '\n'
m.run()
except SyntaxError:
log.exception("Syntax error (this is a bug, not your fault)")
break
except ImportError:
# TODO: if error module is in pip or dpy requirements...
if not tried_requirementstxt:
tried_requirementstxt = True
log.exception("Error starting bot")
log.info("Attempting to install dependencies...")
err = PIP.run_install('--upgrade -r requirements.txt')
if err: # TODO: add the specific error check back as not to always tell users to sudo it
print()
log.critical("You may need to %s to install dependencies." %
['use sudo', 'run as admin'][sys.platform.startswith('win')])
break
else:
print()
log.info("Ok lets hope it worked")
print()
else:
log.exception("Unknown ImportError, exiting.")
break
except Exception as e:
if hasattr(e, '__module__') and e.__module__ == 'musicbot.exceptions':
if e.__class__.__name__ == 'HelpfulError':
log.info(e.message)
break
elif e.__class__.__name__ == "TerminateSignal":
break
elif e.__class__.__name__ == "RestartSignal":
restart()
else:
log.exception("Error starting bot")
finally:
if not m or not m.init_ok:
if any(sys.exc_info()):
# How to log this without redundant messages...
traceback.print_exc()
break
asyncio.set_event_loop(asyncio.new_event_loop())
loops += 1
sleeptime = min(loops * 2, max_wait_time)
if sleeptime:
log.info("Restarting in {} seconds...".format(loops*2))
time.sleep(sleeptime)
print()
log.info("All done.")
if __name__ == '__main__':
main()
client.login(process.env.BOT_TOKEN);
| [] | [] | [
"PYCHARM_HOSTED",
"PATH"
] | [] | ["PYCHARM_HOSTED", "PATH"] | python | 2 | 0 | |
src/main/java/com/example/openshift/automounter/App.java | package com.example.openshift.automounter;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;
import io.fabric8.kubernetes.api.model.Event;
import io.fabric8.kubernetes.api.model.EventBuilder;
import io.fabric8.kubernetes.api.model.ObjectReferenceBuilder;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimVolumeSource;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaimVolumeSourceBuilder;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.Volume;
import io.fabric8.kubernetes.api.model.VolumeBuilder;
import io.fabric8.kubernetes.api.model.VolumeMount;
import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
import io.fabric8.openshift.api.model.DeploymentConfig;
import io.fabric8.openshift.api.model.DeploymentConfigBuilder;
import io.fabric8.openshift.api.model.DeploymentConfigList;
import io.fabric8.openshift.client.DefaultOpenShiftClient;
import io.fabric8.openshift.client.OpenShiftClient;
import io.fabric8.openshift.client.OpenShiftConfig;
import io.fabric8.openshift.client.OpenShiftConfigBuilder;
public class App {
private static volatile boolean running = true;
public static void main(String[] args) throws InterruptedException {
String masterUrl = "https://openshift.default.svc";
if (System.getenv("AM_MASTER_URL") != null) {
masterUrl = System.getenv("AM_MASTER_URL");
}
final String NAMESPACE = System.getenv("AM_NAMESPACE");
final String EXPECTED_POD_NAME = System.getenv("HOSTNAME");
OpenShiftConfig config = new OpenShiftConfigBuilder()//
.withMasterUrl(masterUrl)//
.withNamespace(NAMESPACE)//
.withOauthToken(System.getenv("AM_TOKEN"))//
.build();
try (OpenShiftClient client = new DefaultOpenShiftClient(config)) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
Event event = new EventBuilder()//
.withNewMetadata().withGenerateName("automount-")//
.endMetadata()//
.withType("Normal")//
.withLastTimestamp(now())//
.withInvolvedObject(new ObjectReferenceBuilder()//
.withNamespace(NAMESPACE)//
.withKind("Pod")//
.withName(EXPECTED_POD_NAME).build())//
.withReason("Exiting")//
.withMessage("Shutting down")//
.build();
client.events().create(event);
running = false;
}
});
Event event = new EventBuilder()//
.withNewMetadata().withGenerateName("automount-")//
.endMetadata()//
.withType("Normal")//
.withLastTimestamp(now())//
.withInvolvedObject(new ObjectReferenceBuilder()//
.withNamespace(NAMESPACE)//
.withKind("Pod")//
.withName(EXPECTED_POD_NAME).build())//
.withReason("Started")//
.withMessage("Watching for automount requests")//
.build();
client.events().create(event);
while (running) {
DeploymentConfigList dcs = client.deploymentConfigs().withLabel("automount", "true").list();
for (DeploymentConfig dc : dcs.getItems()) {
String name = dc.getMetadata().getName();
System.out.print("found: dc/" + name);
List<Volume> volumes = dc.getSpec().getTemplate().getSpec().getVolumes();
boolean unmounted = volumes.stream().filter(p -> "automount".equals(p.getName()))
.collect(Collectors.toList()).isEmpty();
if (unmounted) {
System.out.print(" - volume unmounted");
List<PersistentVolumeClaim> pvcs = client.persistentVolumeClaims().withLabel("automount", name)
.list().getItems();
boolean undefined = pvcs.isEmpty();
if (undefined) {
System.out.print(" - pvc undefined");
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder()//
.withNewMetadata()//
.addToAnnotations("example.com/generated-by", "automounter")//
.addToLabels("automount", name)//
.withGenerateName("automount-" + name + "-")//
.and()//
.withNewSpec()//
.withAccessModes("ReadWriteMany")//
.withNewResources()//
.withRequests(Collections.singletonMap("storage", new Quantity("10m")))//
.endResources()//
.endSpec()//
.build();
client.persistentVolumeClaims().create(pvc);
} else {
System.out.print(" - pvc defined");
PersistentVolumeClaim pvc = pvcs.get(0);
if ("Bound".equals(pvc.getStatus().getPhase())) {
System.out.print(" - pvc bound");
VolumeMount volumeMount = new VolumeMountBuilder()//
.withMountPath("/automounter")//
.withName("automount")//
.build();
PersistentVolumeClaimVolumeSource pvcSource = new PersistentVolumeClaimVolumeSourceBuilder()//
.withClaimName(pvc.getMetadata().getName())//
.build();
Volume volume = new VolumeBuilder()//
.withName("automount")//
.withPersistentVolumeClaim(pvcSource)//
.build();
DeploymentConfig patchedDc = new DeploymentConfigBuilder(dc)//
.editSpec()//
.editTemplate()//
.editSpec()//
.addToVolumes(volume)//
.editFirstContainer()//
.addNewVolumeMountLike(volumeMount)//
.endVolumeMount()//
.endContainer()//
.endSpec()//
.endTemplate()//
.endSpec()//
.build();//
client.deploymentConfigs().createOrReplace(patchedDc);
} else {
System.out.print(" - pvc unbound (" + pvc.getStatus().getPhase() + ")");
}
}
} else {
System.out.println(" is as requested");
// nothing to do
}
}
Thread.sleep(5000);
}
}
System.out.println("exiting");
}
private static String now() {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXXX").format(new Date());
}
}
| [
"\"AM_MASTER_URL\"",
"\"AM_MASTER_URL\"",
"\"AM_NAMESPACE\"",
"\"HOSTNAME\"",
"\"AM_TOKEN\""
] | [] | [
"AM_NAMESPACE",
"AM_TOKEN",
"AM_MASTER_URL",
"HOSTNAME"
] | [] | ["AM_NAMESPACE", "AM_TOKEN", "AM_MASTER_URL", "HOSTNAME"] | java | 4 | 0 | |
dao/mongo/registry.go | package mongo
import (
"time"
"github.com/cuigh/swirl/model"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
)
func (d *Dao) RegistryCreate(registry *model.Registry) (err error) {
d.do(func(db *database) {
err = db.C("registry").Insert(registry)
})
return
}
func (d *Dao) RegistryUpdate(registry *model.Registry) (err error) {
d.do(func(db *database) {
update := bson.M{
"name": registry.Name,
"url": registry.URL,
"username": registry.Username,
"updated_at": time.Now(),
}
if registry.Password != "" {
update["password"] = registry.Password
}
err = db.C("registry").UpdateId(registry.ID, bson.M{"$set": update})
})
return
}
func (d *Dao) RegistryList() (registries []*model.Registry, err error) {
d.do(func(db *database) {
registries = []*model.Registry{}
err = db.C("registry").Find(nil).All(®istries)
})
return
}
func (d *Dao) RegistryGet(id string) (registry *model.Registry, err error) {
d.do(func(db *database) {
registry = &model.Registry{}
err = db.C("registry").FindId(id).One(registry)
if err == mgo.ErrNotFound {
err = nil
} else if err != nil {
registry = nil
}
})
return
}
func (d *Dao) RegistryDelete(id string) (err error) {
d.do(func(db *database) {
err = db.C("registry").RemoveId(id)
})
return
}
| [] | [] | [] | [] | [] | go | null | null | null |
pkg/clients/clients.go | package clients
import (
"context"
"flag"
"os"
"time"
ocpv1 "github.com/openshift/api/config/v1"
cfgclientset "github.com/openshift/client-go/config/clientset/versioned"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
)
type Interface interface {
GetInfrastructure() (*ocpv1.Infrastructure, error)
GetConfigMap(namespace, name string) (*v1.ConfigMap, error)
GetSecret(namespace, name string) (*v1.Secret, error)
ListNodes() ([]v1.Node, error)
ListStorageClasses() ([]storagev1.StorageClass, error)
ListPVs() ([]v1.PersistentVolume, error)
}
type clients struct {
// Kubernetes API client
KubeClient kubernetes.Interface
// config.openshift.io client
ConfigClient cfgclientset.Interface
}
var _ Interface = &clients{}
var (
Timeout = flag.Duration("kubernetes-timeout", 10*time.Second, "Timeout of all Kubernetes calls")
)
func Create() (Interface, error) {
var kubeconfig string
// get the KUBECONFIG from env if specified (useful for local/debug cluster)
kubeconfigEnv := os.Getenv("KUBECONFIG")
if kubeconfigEnv != "" {
klog.V(2).Infof("Using KUBECONFIG environment variable to connect")
kubeconfig = kubeconfigEnv
}
var config *rest.Config
var err error
if kubeconfig != "" {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
klog.Infof("Building kube configs for running in cluster...")
config, err = rest.InClusterConfig()
}
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cfgClient, err := cfgclientset.NewForConfig(config)
if err != nil {
return nil, err
}
return &clients{
KubeClient: kubeClient,
ConfigClient: cfgClient,
}, nil
}
func (c *clients) GetInfrastructure() (*ocpv1.Infrastructure, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
return c.ConfigClient.ConfigV1().Infrastructures().Get(ctx, "cluster", metav1.GetOptions{})
}
func (c *clients) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
return c.KubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
}
func (c *clients) GetSecret(namespace, name string) (*v1.Secret, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
return c.KubeClient.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
}
func (c *clients) ListNodes() ([]v1.Node, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
list, err := c.KubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return list.Items, nil
}
func (c *clients) ListStorageClasses() ([]storagev1.StorageClass, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
list, err := c.KubeClient.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return list.Items, nil
}
func (c *clients) ListPVs() ([]v1.PersistentVolume, error) {
ctx, cancel := context.WithTimeout(context.Background(), *Timeout)
defer cancel()
list, err := c.KubeClient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return list.Items, nil
}
| [
"\"KUBECONFIG\""
] | [] | [
"KUBECONFIG"
] | [] | ["KUBECONFIG"] | go | 1 | 0 | |
examples/FasterRCNN/config.py | # -*- coding: utf-8 -*-
# File: config.py
import numpy as np
import os
import pprint
import six
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
__all__ = ['config', 'finalize_configs']
class AttrDict():
_freezed = False
""" Avoid accidental creation of new hierarchies. """
def __getattr__(self, name):
if self._freezed:
raise AttributeError(name)
if name.startswith('_'):
# Do not mess with internals. Otherwise copy/pickle will fail
raise AttributeError(name)
ret = AttrDict()
setattr(self, name, ret)
return ret
def __setattr__(self, name, value):
if self._freezed and name not in self.__dict__:
raise AttributeError(
"Config was freezed! Unknown config: {}".format(name))
super().__setattr__(name, value)
def __str__(self):
return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)
__repr__ = __str__
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')}
def from_dict(self, d):
self.freeze(False)
for k, v in d.items():
self_v = getattr(self, k)
if isinstance(self_v, AttrDict):
self_v.from_dict(v)
else:
setattr(self, k, v)
def update_args(self, args):
"""Update from command line args. """
for cfg in args:
keys, v = cfg.split('=', maxsplit=1)
keylist = keys.split('.')
dic = self
for k in keylist[:-1]:
assert k in dir(dic), "Unknown config key: {}".format(keys)
dic = getattr(dic, k)
key = keylist[-1]
oldv = getattr(dic, key)
if not isinstance(oldv, str):
v = eval(v)
setattr(dic, key, v)
def freeze(self, freezed=True):
self._freezed = freezed
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.freeze(freezed)
# avoid silent bugs
def __eq__(self, _):
raise NotImplementedError()
def __ne__(self, _):
raise NotImplementedError()
config = AttrDict()
_C = config # short alias to avoid coding
# mode flags ---------------------
_C.TRAINER = 'replicated' # options: 'horovod', 'replicated'
_C.MODE_MASK = True # Faster R-CNN or Mask R-CNN
_C.MODE_FPN = True
# dataset -----------------------
_C.DATA.BASEDIR = '/path/to/your/DATA/DIR'
# All available dataset names are defined in `dataset/coco.py:register_coco`.
# All TRAIN dataset will be concatenated for training.
_C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C.DATA.VAL = ('coco_val2017',) # AKA minival2014
# These two configs will be populated later inside `finalize_configs`.
_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)
_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# whether the coordinates in your registered dataset are
# absolute pixel values in range [0, W or H] or relative values in [0, 1]
_C.DATA.ABSOLUTE_COORD = True
# Filter Negative Samples from dataset
_C.DATA.FILTER_EMPTY_ANNOTATIONS = True
# Number of data loading workers.
# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).
# Set to 0 to disable parallel data loading
_C.DATA.NUM_WORKERS = 10
# backbone ----------------------
_C.BACKBONE.WEIGHTS = ''
# To train from scratch, set it to empty, and set FREEZE_AT to 0
# To train from ImageNet pre-trained models, use the one that matches your
# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.
# To train from an existing COCO model, use the path to that file, and change
# the other configurations according to that model.
_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] # for resnet50
# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101
_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers
_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None
_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)
# Use a base model with TF-preferred padding mode,
# which may pad more pixels on right/bottom than top/left.
# See https://github.com/tensorflow/tensorflow/issues/18213
# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.
# Using either one should probably give the same performance.
# We use the "AlignPadding" one just to be consistent with caffe2.
_C.BACKBONE.TF_PAD_MODE = False
_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models
# schedule -----------------------
_C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C.TRAIN.WEIGHT_DECAY = 1e-4
_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs
_C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.STEPS_PER_EPOCH = 500
_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
# It can be either a string like "3x" that refers to standard convention, or a list of int.
# LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which
# means to decrease LR at steps 420k and 500k and stop training at 540k.
# When the total bs!=8, the actual iterations to decrease learning rate, and
# the base learning rate are computed from BASE_LR and LR_SCHEDULE.
# Therefore, there is *no need* to modify the config if you only change the number of GPUs.
_C.TRAIN.LR_SCHEDULE = "1x" # "1x" schedule in detectron
_C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation
_C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [800, 800] # [min, max] to sample from
_C.PREPROC.TEST_SHORT_EDGE_SIZE = 800
_C.PREPROC.MAX_SIZE = 1333
# mean and std in RGB order.
# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]
_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]
# anchors -------------------------
_C.RPN.ANCHOR_STRIDE = 16
_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)
_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7
_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3
# rpn training -------------------------
_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors
_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
_C.RPN.MIN_SIZE = 0
_C.RPN.PROPOSAL_NMS_THRESH = 0.7
# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.
# Setting this to a value larger than 1.0 will disable the feature.
# It is disabled by default because Detectron does not do this.
_C.RPN.CROWD_OVERLAP_THRESH = 9.99
_C.RPN.HEAD_DIM = 1024 # used in C4 only
# RPN proposal selection -------------------------------
# for C4
_C.RPN.TRAIN_PRE_NMS_TOPK = 12000
_C.RPN.TRAIN_POST_NMS_TOPK = 2000
_C.RPN.TEST_PRE_NMS_TOPK = 6000
_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number
# for FPN, #proposals per-level and #proposals after merging are (for now) the same
# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect
_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000
_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000
# fastrcnn training ---------------------
_C.FRCNN.BATCH_PER_IM = 512
_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10
_C.FRCNN.FG_THRESH = 0.5
_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch
# FPN -------------------------
_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'
_C.FPN.NUM_CHANNEL = 256
_C.FPN.NORM = 'None' # 'None', 'GN'
# The head option is only used in FPN. For C4 models, the head is C5
_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'
# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head
_C.FPN.FRCNN_CONV_HEAD_DIM = 256
_C.FPN.FRCNN_FC_HEAD_DIM = 1024
_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head
# Mask R-CNN
_C.MRCNN.HEAD_DIM = 256
_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy
# Cascade R-CNN, only available in FPN mode
_C.FPN.CASCADE = False
_C.CASCADE.IOUS = [0.5, 0.6, 0.7]
_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
# testing -----------------------
_C.TEST.FRCNN_NMS_THRESH = 0.5
# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.
# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa
_C.TEST.RESULT_SCORE_THRESH = 0.05
_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results
_C.TEST.RESULTS_PER_IM = 100
_C.freeze() # avoid typo / wrong config keys
def finalize_configs(is_training):
"""
Run some sanity checks, and populate some configs from others
"""
_C.freeze(False) # populate new keys now
if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well
_C.DATA.VAL = (_C.DATA.VAL, )
if isinstance(_C.DATA.TRAIN, six.string_types): # support single string
_C.DATA.TRAIN = (_C.DATA.TRAIN, )
# finalize dataset definitions ...
from dataset import DatasetRegistry
datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)
_C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names")
_C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1
assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM
if _C.BACKBONE.NORM != 'FreezeBN':
assert not _C.BACKBONE.FREEZE_AFFINE
assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]
_C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)
assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)
# image size into the backbone has to be multiple of this number
_C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5
if _C.MODE_FPN:
size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.
_C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult
assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']
assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.NORM in ['None', 'GN']
if _C.FPN.CASCADE:
# the first threshold is the proposal sampling threshold
assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH
assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)
if is_training:
train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE
if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:
# don't autotune if augmentation is on
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '1'
assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER
lr = _C.TRAIN.LR_SCHEDULE
if isinstance(lr, six.string_types):
if lr.endswith("x"):
LR_SCHEDULE_KITER = {
"{}x".format(k):
[180 * k - 120, 180 * k - 40, 180 * k]
for k in range(2, 10)}
LR_SCHEDULE_KITER["1x"] = [120, 160, 180]
_C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]
else:
_C.TRAIN.LR_SCHEDULE = eval(lr)
# setup NUM_GPUS
if _C.TRAINER == 'horovod':
import horovod.tensorflow as hvd
ngpu = hvd.size()
logger.info("Horovod Rank={}, Size={}, LocalRank={}".format(
hvd.rank(), hvd.size(), hvd.local_rank()))
else:
assert 'OMPI_COMM_WORLD_SIZE' not in os.environ
ngpu = get_num_gpu()
assert ngpu > 0, "Has to train with GPU!"
assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu)
else:
# autotune is too slow for inference
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
ngpu = get_num_gpu()
if _C.TRAIN.NUM_GPUS is None:
_C.TRAIN.NUM_GPUS = ngpu
else:
if _C.TRAINER == 'horovod':
assert _C.TRAIN.NUM_GPUS == ngpu
else:
assert _C.TRAIN.NUM_GPUS <= ngpu
_C.freeze()
logger.info("Config: ------------------------------------------\n" + str(_C))
| [] | [] | [
"TF_AUTOTUNE_THRESHOLD",
"TF_CUDNN_USE_AUTOTUNE"
] | [] | ["TF_AUTOTUNE_THRESHOLD", "TF_CUDNN_USE_AUTOTUNE"] | python | 2 | 0 | |
doc/conf.py | # -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
from distutils.version import LooseVersion
import gc
import os
import os.path as op
import sys
import warnings
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import matplotlib
import mne
from mne.utils import linkcode_resolve # noqa, analysis:ignore
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
'%s' % (sphinx_gallery.__version__,))
matplotlib.use('agg')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
if not os.path.isdir('_images'):
os.mkdir('_images')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx_gallery.gen_gallery',
'sphinx_fontawesome',
'numpydoc',
'gen_commands',
'sphinx_bootstrap_theme',
'sphinx_bootstrap_divs',
]
linkcheck_ignore = [
'https://doi.org/10.1088/0031-9155/57/7/1937', # 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta
'https://sccn.ucsd.edu/wiki/.*', # HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),))
'https://docs.python.org/dev/howto/logging.html', # ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://docs.python.org/3/library/.*', # ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442/', # Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
nitpicky = True
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "autolink"
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ', # we replace this with an image
'source_link_position': "nav", # default
'bootswatch_theme': "flatly", # yeti paper lumen
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("Install", "install/index"),
("Documentation", "documentation"),
("API", "python_reference"),
("Glossary", "glossary"),
("Examples", "auto_examples/index"),
("Tutorials", "auto_tutorials/index"),
("Contribute", "install/contributing"),
],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://www.numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'statsmodels': ('http://www.statsmodels.org/stable/', None),
'dipy': ('http://nipy.org/dipy', None),
'mne_realtime': ('https://mne-tools.github.io/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
}
##############################################################################
# sphinx-gallery
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'auto_tutorials']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
except Exception:
pass
else:
scrapers += ('mayavi',)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = True
except Exception:
pass
else:
scrapers += ('pyvista',)
if any(x in scrapers for x in ('pyvista', 'mayavi')):
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
def setup(app):
if report_scraper is not None:
report_scraper.app = app
app.connect('build-finished', report_scraper.copyfiles)
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
gc.collect()
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
reset_warnings(None, None)
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/source-modeling/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/discussions/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'download_section_examples': False,
'thumbnail_size': (160, 112),
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': True,
'line_numbers': False, # XXX currently (0.3.dev0) messes with style
'within_subsection_order': FileNameSortKey,
'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
}
##############################################################################
# numpydoc
# XXX This hack defines what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = False
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
'Popen': 'python:subprocess.Popen',
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'Montage': 'mne.channels.Montage',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
}
| [] | [] | [
"_MNE_BUILDING_DOC",
"BUILD_DEV_HTML"
] | [] | ["_MNE_BUILDING_DOC", "BUILD_DEV_HTML"] | python | 2 | 0 | |
config/celery.py | import os
from celery import Celery
# Set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
app = Celery("resourcing_approval")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django apps.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f"Request: {self.request!r}")
| [] | [] | [] | [] | [] | python | 0 | 0 | |
OnlineContests/4/sparse_arrays.py | #!/bin/python3
import math
import os
import random
import re
import sys
def matchingStrings(strings, queries):
return map(strings.count, queries)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
strings_count = int(input())
strings = []
for _ in range(strings_count):
strings_item = input()
strings.append(strings_item)
queries_count = int(input())
queries = []
for _ in range(queries_count):
queries_item = input()
queries.append(queries_item)
res = matchingStrings(strings, queries)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
sntp/test_sntp.py | # Test sntp.py using pytest with CPython
# Copyright (c) 2020 by Thorsten von Eicken
import pytest
import sntp
import time, os, socket, asyncio
pytestmark = pytest.mark.timeout(2)
UNIX_DELTA = 946684800
NTP_DELTA = 3155673600
os.environ["TZ"] = "UTC"
time.tzset()
def mktime(y, mo, d, h, m, s):
return int(time.mktime((y, mo, d, h, m, s, 0, 0, 0)))
def test_deltas():
assert UNIX_DELTA == mktime(2000, 1, 1, 0, 0, 0) - mktime(1970, 1, 1, 0, 0, 0)
assert NTP_DELTA == mktime(2000, 1, 1, 0, 0, 0) - mktime(1900, 1, 1, 0, 0, 0)
def test_round_trip():
for year in [ 2019, 2025 ]:
mp1 = (mktime(year, 2, 24, 17, 59, 10) - UNIX_DELTA) * 1000000 + 238000
ntp = sntp.mp2ntp(mp1)
mp2 = sntp.ntp2mp(*ntp)
assert abs(mp1 - mp2) < 2
def test_mp2ntp():
mp1 = 1234 * 1000000 + 500000
ntp1got = sntp.mp2ntp(mp1)
ntp1exp = (1234 + NTP_DELTA, 0x80000000)
assert ntp1got == ntp1exp
# example from http://www.ntp.org/ntpfaq/NTP-s-algo.htm #5.1.2.3
unix2 = (0x39AEA96E, 0x000B3A75)
mp2 = (unix2[0] - UNIX_DELTA) * 1000000 + unix2[1]
ntp2got = sntp.mp2ntp(mp2)
ntp2exp = (0xBD5927EE, 0xBC616000)
print("%x %x" % (ntp2got[1], ntp2exp[1]))
assert ntp2got[0] == ntp2exp[0]
assert abs(ntp2got[1] - ntp2exp[1]) < (2 ** 32) / 1000000
def test_ntp2mp():
ntp1 = (NTP_DELTA, 0x80000000)
mp1got = sntp.ntp2mp(*ntp1)
mp1exp = 500000
assert mp1got == mp1exp
# example from http://www.ntp.org/ntpfaq/NTP-s-algo.htm #5.1.2.3
ntp2 = (0xBD5927EE, 0xBC616000)
mp2got = sntp.ntp2mp(*ntp2)
unix2got = divmod(mp2got, 1000000)
unix2got = (unix2got[0] + UNIX_DELTA, unix2got[1])
unix2exp = (0x39AEA96E, 0x000B3A75)
print("%x %x" % (unix2got[1], unix2exp[1]))
assert unix2got[0] == unix2exp[0]
assert abs(unix2got[1] - unix2exp[1]) < 2
@pytest.mark.asyncio
async def test_poll():
sntpcli = sntp.SNTP()
delay, step = await sntpcli._poll()
assert delay < 200*1000
assert abs(step) < 100*1000
@pytest.mark.asyncio
async def test_poll_dns_fail():
sntpcli = sntp.SNTP(host="nonexistant.example.com")
with pytest.raises(socket.gaierror):
delay, step = await sntpcli._poll()
@pytest.mark.asyncio
async def test_poll_timeout():
sntpcli = sntp.SNTP(host="1.1.1.1")
with pytest.raises(asyncio.TimeoutError):
delay, step = await sntpcli._poll()
@pytest.mark.asyncio
async def test_start_stop():
sntpcli = sntp.SNTP()
sntpcli.start()
await asyncio.sleep(1)
await sntpcli.stop()
| [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
large_objects_test.go | package pgx_test
import (
"context"
"io"
"os"
"testing"
"time"
"github.com/jackc/pgconn"
"github.com/khaibin/pgx/v4"
)
func TestLargeObjects(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
t.Fatal(err)
}
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
}
testLargeObjects(t, ctx, tx)
}
func TestLargeObjectsPreferSimpleProtocol(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
t.Fatal(err)
}
config.PreferSimpleProtocol = true
conn, err := pgx.ConnectConfig(ctx, config)
if err != nil {
t.Fatal(err)
}
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
}
testLargeObjects(t, ctx, tx)
}
func testLargeObjects(t *testing.T, ctx context.Context, tx pgx.Tx) {
lo := tx.LargeObjects()
id, err := lo.Create(ctx, 0)
if err != nil {
t.Fatal(err)
}
obj, err := lo.Open(ctx, id, pgx.LargeObjectModeRead|pgx.LargeObjectModeWrite)
if err != nil {
t.Fatal(err)
}
n, err := obj.Write([]byte("testing"))
if err != nil {
t.Fatal(err)
}
if n != 7 {
t.Errorf("Expected n to be 7, got %d", n)
}
pos, err := obj.Seek(1, 0)
if err != nil {
t.Fatal(err)
}
if pos != 1 {
t.Errorf("Expected pos to be 1, got %d", pos)
}
res := make([]byte, 6)
n, err = obj.Read(res)
if err != nil {
t.Fatal(err)
}
if string(res) != "esting" {
t.Errorf(`Expected res to be "esting", got %q`, res)
}
if n != 6 {
t.Errorf("Expected n to be 6, got %d", n)
}
n, err = obj.Read(res)
if err != io.EOF {
t.Error("Expected io.EOF, go nil")
}
if n != 0 {
t.Errorf("Expected n to be 0, got %d", n)
}
pos, err = obj.Tell()
if err != nil {
t.Fatal(err)
}
if pos != 7 {
t.Errorf("Expected pos to be 7, got %d", pos)
}
err = obj.Truncate(1)
if err != nil {
t.Fatal(err)
}
pos, err = obj.Seek(-1, 2)
if err != nil {
t.Fatal(err)
}
if pos != 0 {
t.Errorf("Expected pos to be 0, got %d", pos)
}
res = make([]byte, 2)
n, err = obj.Read(res)
if err != io.EOF {
t.Errorf("Expected err to be io.EOF, got %v", err)
}
if n != 1 {
t.Errorf("Expected n to be 1, got %d", n)
}
if res[0] != 't' {
t.Errorf("Expected res[0] to be 't', got %v", res[0])
}
err = obj.Close()
if err != nil {
t.Fatal(err)
}
err = lo.Unlink(ctx, id)
if err != nil {
t.Fatal(err)
}
_, err = lo.Open(ctx, id, pgx.LargeObjectModeRead)
if e, ok := err.(*pgconn.PgError); !ok || e.Code != "42704" {
t.Errorf("Expected undefined_object error (42704), got %#v", err)
}
}
func TestLargeObjectsMultipleTransactions(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
conn, err := pgx.Connect(ctx, os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
t.Fatal(err)
}
tx, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
}
lo := tx.LargeObjects()
id, err := lo.Create(ctx, 0)
if err != nil {
t.Fatal(err)
}
obj, err := lo.Open(ctx, id, pgx.LargeObjectModeWrite)
if err != nil {
t.Fatal(err)
}
n, err := obj.Write([]byte("testing"))
if err != nil {
t.Fatal(err)
}
if n != 7 {
t.Errorf("Expected n to be 7, got %d", n)
}
// Commit the first transaction
err = tx.Commit(ctx)
if err != nil {
t.Fatal(err)
}
// IMPORTANT: Use the same connection for another query
query := `select n from generate_series(1,10) n`
rows, err := conn.Query(ctx, query)
if err != nil {
t.Fatal(err)
}
rows.Close()
// Start a new transaction
tx2, err := conn.Begin(ctx)
if err != nil {
t.Fatal(err)
}
lo2 := tx2.LargeObjects()
// Reopen the large object in the new transaction
obj2, err := lo2.Open(ctx, id, pgx.LargeObjectModeRead|pgx.LargeObjectModeWrite)
if err != nil {
t.Fatal(err)
}
pos, err := obj2.Seek(1, 0)
if err != nil {
t.Fatal(err)
}
if pos != 1 {
t.Errorf("Expected pos to be 1, got %d", pos)
}
res := make([]byte, 6)
n, err = obj2.Read(res)
if err != nil {
t.Fatal(err)
}
if string(res) != "esting" {
t.Errorf(`Expected res to be "esting", got %q`, res)
}
if n != 6 {
t.Errorf("Expected n to be 6, got %d", n)
}
n, err = obj2.Read(res)
if err != io.EOF {
t.Error("Expected io.EOF, go nil")
}
if n != 0 {
t.Errorf("Expected n to be 0, got %d", n)
}
pos, err = obj2.Tell()
if err != nil {
t.Fatal(err)
}
if pos != 7 {
t.Errorf("Expected pos to be 7, got %d", pos)
}
err = obj2.Truncate(1)
if err != nil {
t.Fatal(err)
}
pos, err = obj2.Seek(-1, 2)
if err != nil {
t.Fatal(err)
}
if pos != 0 {
t.Errorf("Expected pos to be 0, got %d", pos)
}
res = make([]byte, 2)
n, err = obj2.Read(res)
if err != io.EOF {
t.Errorf("Expected err to be io.EOF, got %v", err)
}
if n != 1 {
t.Errorf("Expected n to be 1, got %d", n)
}
if res[0] != 't' {
t.Errorf("Expected res[0] to be 't', got %v", res[0])
}
err = obj2.Close()
if err != nil {
t.Fatal(err)
}
err = lo2.Unlink(ctx, id)
if err != nil {
t.Fatal(err)
}
_, err = lo2.Open(ctx, id, pgx.LargeObjectModeRead)
if e, ok := err.(*pgconn.PgError); !ok || e.Code != "42704" {
t.Errorf("Expected undefined_object error (42704), got %#v", err)
}
}
| [
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\"",
"\"PGX_TEST_DATABASE\""
] | [] | [
"PGX_TEST_DATABASE"
] | [] | ["PGX_TEST_DATABASE"] | go | 1 | 0 | |
pubsub/rabbitpubsub/rabbit_test.go | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rabbitpubsub
// To run these tests against a real RabbitMQ server, first run:
// docker run -d --hostname my-rabbit --name rabbit -p 5672:5672 rabbitmq:3
// Then wait a few seconds for the server to be ready.
// If no server is running, the tests will use a fake (see fake_tset.go).
import (
"context"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/streadway/amqp"
"gocloud.dev/gcerrors"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"gocloud.dev/pubsub/drivertest"
)
const rabbitURL = "amqp://guest:guest@localhost:5672/"
var logOnce sync.Once
func mustDialRabbit(t testing.TB) amqpConnection {
conn, err := amqp.Dial(rabbitURL)
if err != nil {
logOnce.Do(func() {
t.Logf("using the fake because the RabbitMQ server is not up (dial error: %v)", err)
})
return newFakeConnection()
}
logOnce.Do(func() {
t.Logf("using the RabbitMQ server at %s", rabbitURL)
})
return &connection{conn}
}
func TestConformance(t *testing.T) {
harnessMaker := func(_ context.Context, t *testing.T) (drivertest.Harness, error) {
return &harness{conn: mustDialRabbit(t)}, nil
}
_, isFake := mustDialRabbit(t).(*fakeConnection)
asTests := []drivertest.AsTest{rabbitAsTest{isFake}}
drivertest.RunConformanceTests(t, harnessMaker, asTests)
// Run the conformance tests with the fake if we haven't.
if isFake {
return
}
t.Logf("now running tests with the fake")
harnessMaker = func(_ context.Context, t *testing.T) (drivertest.Harness, error) {
return &harness{conn: newFakeConnection()}, nil
}
asTests = []drivertest.AsTest{rabbitAsTest{true}}
drivertest.RunConformanceTests(t, harnessMaker, asTests)
}
func BenchmarkRabbit(b *testing.B) {
ctx := context.Background()
h := &harness{conn: mustDialRabbit(b)}
dt, cleanup, err := h.CreateTopic(ctx, b.Name())
if err != nil {
b.Fatal(err)
}
defer cleanup()
ds, cleanup, err := h.CreateSubscription(ctx, dt, b.Name())
if err != nil {
b.Fatal(err)
}
defer cleanup()
topic := pubsub.NewTopic(dt, nil)
defer topic.Shutdown(ctx)
sub := pubsub.NewSubscription(ds, nil, nil)
defer sub.Shutdown(ctx)
drivertest.RunBenchmarks(b, topic, sub)
}
type harness struct {
conn amqpConnection
numTopics uint32
numSubs uint32
}
func (h *harness) CreateTopic(_ context.Context, testName string) (dt driver.Topic, cleanup func(), err error) {
exchange := fmt.Sprintf("%s-topic-%d", testName, atomic.AddUint32(&h.numTopics, 1))
if err := declareExchange(h.conn, exchange); err != nil {
return nil, nil, err
}
cleanup = func() {
ch, err := h.conn.Channel()
if err != nil {
panic(err)
}
ch.ExchangeDelete(exchange)
}
return newTopic(h.conn, exchange), cleanup, nil
}
func (h *harness) MakeNonexistentTopic(context.Context) (driver.Topic, error) {
return newTopic(h.conn, "nonexistent-topic"), nil
}
func (h *harness) CreateSubscription(_ context.Context, dt driver.Topic, testName string) (ds driver.Subscription, cleanup func(), err error) {
queue := fmt.Sprintf("%s-subscription-%d", testName, atomic.AddUint32(&h.numSubs, 1))
if err := bindQueue(h.conn, queue, dt.(*topic).exchange); err != nil {
return nil, nil, err
}
cleanup = func() {
ch, err := h.conn.Channel()
if err != nil {
panic(err)
}
ch.QueueDelete(queue)
}
ds = newSubscription(h.conn, queue)
return ds, cleanup, nil
}
func (h *harness) MakeNonexistentSubscription(_ context.Context) (driver.Subscription, error) {
return newSubscription(h.conn, "nonexistent-subscription"), nil
}
func (h *harness) Close() {
h.conn.Close()
}
func (h *harness) MaxBatchSizes() (int, int) { return 0, 0 }
// This test is important for the RabbitMQ driver because the underlying client is
// poorly designed with respect to concurrency, so we must make sure to exercise the
// driver with concurrent calls.
//
// We can't make this a conformance test at this time because there is no way
// to set the batcher's maxHandlers parameter to anything other than 1.
func TestPublishConcurrently(t *testing.T) {
// See if we can call SendBatch concurrently without deadlock or races.
ctx := context.Background()
conn := mustDialRabbit(t)
defer conn.Close()
if err := declareExchange(conn, "t"); err != nil {
t.Fatal(err)
}
// The queue is needed, or RabbitMQ says the message is unroutable.
if err := bindQueue(conn, "s", "t"); err != nil {
t.Fatal(err)
}
topic := newTopic(conn, "t")
errc := make(chan error, 100)
for g := 0; g < cap(errc); g++ {
g := g
go func() {
var msgs []*driver.Message
for i := 0; i < 10; i++ {
msgs = append(msgs, &driver.Message{
Metadata: map[string]string{"a": strconv.Itoa(i)},
Body: []byte(fmt.Sprintf("msg-%d-%d", g, i)),
})
}
errc <- topic.SendBatch(ctx, msgs)
}()
}
for i := 0; i < cap(errc); i++ {
if err := <-errc; err != nil {
t.Fatal(err)
}
}
}
func TestUnroutable(t *testing.T) {
// Expect that we get an error on publish if the exchange has no queue bound to it.
// The error should be a MultiError containing one error per message.
ctx := context.Background()
conn := mustDialRabbit(t)
defer conn.Close()
if err := declareExchange(conn, "u"); err != nil {
t.Fatal(err)
}
topic := newTopic(conn, "u")
msgs := []*driver.Message{
{Body: []byte("")},
{Body: []byte("")},
}
err := topic.SendBatch(ctx, msgs)
merr, ok := err.(MultiError)
if !ok {
t.Fatalf("got error of type %T, want MultiError", err)
}
if got, want := len(merr), len(msgs); got != want {
t.Fatalf("got %d errors, want %d", got, want)
}
// Test MultiError.Error.
if got, want := strings.Count(merr.Error(), ";")+1, len(merr); got != want {
t.Errorf("got %d semicolon-separated messages, want %d", got, want)
}
// Test each individual error.
for i, err := range merr {
if !strings.Contains(err.Error(), "NO_ROUTE") {
t.Errorf("%d: got %v, want an error with 'NO_ROUTE'", i, err)
}
}
}
func TestErrorCode(t *testing.T) {
for _, test := range []struct {
in error
want gcerrors.ErrorCode
}{
{nil, gcerrors.Unknown},
{&os.PathError{}, gcerrors.Unknown},
{&amqp.Error{Code: amqp.SyntaxError}, gcerrors.Internal},
{&amqp.Error{Code: amqp.NotImplemented}, gcerrors.Unimplemented},
{&amqp.Error{Code: amqp.ContentTooLarge}, gcerrors.Unknown},
} {
if got := errorCode(test.in); got != test.want {
t.Errorf("%v: got %s, want %s", test.in, got, test.want)
}
}
}
func TestOpens(t *testing.T) {
ctx := context.Background()
if got := OpenTopic(nil, "t", nil); got == nil {
t.Error("got nil, want non-nil")
} else {
got.Shutdown(ctx)
}
if got := OpenSubscription(nil, "s", nil); got == nil {
t.Error("got nil, want non-nil")
} else {
got.Shutdown(ctx)
}
}
func TestCancelSendAndReceive(t *testing.T) {
conn := mustDialRabbit(t)
defer conn.Close()
if err := declareExchange(conn, "t"); err != nil {
t.Fatal(err)
}
// The queue is needed, or RabbitMQ says the message is unroutable.
if err := bindQueue(conn, "s", "t"); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
topic := newTopic(conn, "t")
topic.sendBatchHook = cancel
msgs := []*driver.Message{
{Body: []byte("")},
}
var err error
for err == nil {
err = topic.SendBatch(ctx, msgs)
}
ec := errorCodeForTest(err)
// Error might either be from context being canceled, or channel subsequently being closed.
if ec != gcerrors.Canceled && ec != gcerrors.FailedPrecondition {
t.Errorf("got %v, want context.Canceled or FailedPrecondition", err)
}
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
if err := topic.SendBatch(ctx, msgs); err != nil {
t.Fatal(err)
}
sub := newSubscription(conn, "s")
sub.receiveBatchHook = cancel
_, err = sub.ReceiveBatch(ctx, 4)
if err != context.Canceled {
t.Errorf("got %v, want context.Canceled", err)
}
}
// Includes some cases that are handled elsewhere in production code.
func errorCodeForTest(err error) gcerrors.ErrorCode {
switch err {
case nil:
return gcerrors.OK
case context.Canceled:
return gcerrors.Canceled
case context.DeadlineExceeded:
return gcerrors.DeadlineExceeded
default:
return errorCode(err)
}
}
func TestIsRetryable(t *testing.T) {
for _, test := range []struct {
err error
want bool
}{
{errors.New("xyz"), false},
{io.ErrUnexpectedEOF, false},
{&amqp.Error{Code: amqp.AccessRefused}, false},
{&amqp.Error{Code: amqp.ContentTooLarge}, true},
{&amqp.Error{Code: amqp.ConnectionForced}, true},
} {
got := isRetryable(test.err)
if got != test.want {
t.Errorf("%+v: got %t, want %t", test.err, got, test.want)
}
}
}
func TestRunWithContext(t *testing.T) {
// runWithContext will run its argument to completion if the context isn't done.
e := errors.New("")
// f sleeps for a bit just to give the scheduler a chance to run.
f := func() error { time.Sleep(100 * time.Millisecond); return e }
got := runWithContext(context.Background(), f)
if want := e; got != want {
t.Errorf("got %v, want %v", got, want)
}
// runWithContext will return ctx.Err if context is done.
ctx, cancel := context.WithCancel(context.Background())
cancel()
got = runWithContext(ctx, f)
if want := context.Canceled; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
func declareExchange(conn amqpConnection, name string) error {
ch, err := conn.Channel()
if err != nil {
panic(err)
}
defer ch.Close()
return ch.ExchangeDeclare(name)
}
func bindQueue(conn amqpConnection, queueName, exchangeName string) error {
ch, err := conn.Channel()
if err != nil {
return err
}
defer ch.Close()
return ch.QueueDeclareAndBind(queueName, exchangeName)
}
type rabbitAsTest struct {
usingFake bool
}
func (rabbitAsTest) Name() string {
return "rabbit test"
}
func (r rabbitAsTest) TopicCheck(topic *pubsub.Topic) error {
var conn2 amqp.Connection
if topic.As(&conn2) {
return fmt.Errorf("cast succeeded for %T, want failure", &conn2)
}
if !r.usingFake {
var conn3 *amqp.Connection
if !topic.As(&conn3) {
return fmt.Errorf("cast failed for %T", &conn3)
}
}
return nil
}
func (r rabbitAsTest) SubscriptionCheck(sub *pubsub.Subscription) error {
var conn2 amqp.Connection
if sub.As(&conn2) {
return fmt.Errorf("cast succeeded for %T, want failure", &conn2)
}
if !r.usingFake {
var conn3 *amqp.Connection
if !sub.As(&conn3) {
return fmt.Errorf("cast failed for %T", &conn3)
}
}
return nil
}
func (rabbitAsTest) TopicErrorCheck(t *pubsub.Topic, err error) error {
var aerr *amqp.Error
if !t.ErrorAs(err, &aerr) {
return fmt.Errorf("failed to convert %v (%T) to an amqp.Error", err, err)
}
if aerr.Code != amqp.NotFound {
return fmt.Errorf("got code %v, want NotFound", aerr.Code)
}
err = MultiError{err}
var merr MultiError
if !t.ErrorAs(err, &merr) {
return fmt.Errorf("failed to convert %v (%T) to a MultiError", err, err)
}
var perr *os.PathError
if t.ErrorAs(err, &perr) {
return errors.New("got true for PathError, want false")
}
return nil
}
func (rabbitAsTest) SubscriptionErrorCheck(s *pubsub.Subscription, err error) error {
var aerr *amqp.Error
if !s.ErrorAs(err, &aerr) {
return fmt.Errorf("failed to convert %v (%T) to an amqp.Error", err, err)
}
if aerr.Code != amqp.NotFound {
return fmt.Errorf("got code %v, want NotFound", aerr.Code)
}
err = MultiError{err}
var merr MultiError
if !s.ErrorAs(err, &merr) {
return fmt.Errorf("failed to convert %v (%T) to a MultiError", err, err)
}
var perr *os.PathError
if s.ErrorAs(err, &perr) {
return errors.New("got true for PathError, want false")
}
return nil
}
func (r rabbitAsTest) MessageCheck(m *pubsub.Message) error {
var pd *amqp.Delivery
if m.As(&pd) {
return fmt.Errorf("cast succeeded for %T, want failure", &pd)
}
if !r.usingFake {
var d amqp.Delivery
if !m.As(&d) {
return fmt.Errorf("cast failed for %T", &d)
}
}
return nil
}
func fakeConnectionStringInEnv() func() {
oldEnvVal := os.Getenv("RABBIT_SERVER_URL")
os.Setenv("RABBIT_SERVER_URL", "amqp://localhost:10000/vhost")
return func() {
os.Setenv("RABBIT_SERVER_URL", oldEnvVal)
}
}
func TestOpenTopicFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK, but still error because Dial fails.
{"rabbit://myexchange", true},
// Invalid parameter.
{"rabbit://myexchange?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
topic, err := pubsub.OpenTopic(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
if topic != nil {
topic.Shutdown(ctx)
}
}
}
func TestOpenSubscriptionFromURL(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK, but error because Dial fails.
{"rabbit://myqueue", true},
// Invalid parameter.
{"rabbit://myqueue?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
sub, err := pubsub.OpenSubscription(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
if sub != nil {
sub.Shutdown(ctx)
}
}
}
| [
"\"RABBIT_SERVER_URL\""
] | [] | [
"RABBIT_SERVER_URL"
] | [] | ["RABBIT_SERVER_URL"] | go | 1 | 0 | |
pkg/sentry/fs/proc/device/device_state_autogen.go | // automatically generated by stateify.
package device
| [] | [] | [] | [] | [] | go | null | null | null |
ImagesDBApp/wsgi.py | """
WSGI config for ImagesDB project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ImagesDB.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
Ska/engarchive/fetch.py | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Fetch values from the Ska engineering telemetry archive.
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import time
import contextlib
import logging
import operator
import fnmatch
import collections
import warnings
import re
import numpy as np
from astropy.io import ascii
import pyyaks.context
import six
from six.moves import cPickle as pickle
from six.moves import zip
from . import file_defs
from .units import Units
from . import cache
from . import remote_access
from .version import __version__, __git_version__
from Chandra.Time import DateTime
# Module-level units, defaults to CXC units (e.g. Kelvins etc)
UNITS = Units(system='cxc')
# Module-level control of whether MSID.fetch will cache the last 30 results
CACHE = False
SKA = os.getenv('SKA') or '/proj/sot/ska'
ENG_ARCHIVE = os.getenv('ENG_ARCHIVE') or SKA + '/data/eng_archive'
IGNORE_COLNAMES = ('TIME', 'MJF', 'MNF', 'TLM_FMT')
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
# Dates near the start of 2000 that demarcates the split between the 1999 data
# and post-2000 data. The 1999 data goes out to at least 2000:005:13:00:00,
# while post-2000 data starts as late as 2000:001:11:58:59. Dates between LO
# and HI get taken from either 1999 or post-2000. The times are 4 millisec before
# a minor frame boundary to avoid collisions.
DATE2000_LO = DateTime('2000:001:00:00:00.090').date
DATE2000_HI = DateTime('2000:003:00:00:00.234').date
# Launch date (earliest possible date for telemetry)
LAUNCH_DATE = '1999:204'
# Maximum number of MSIDs that should ever match an input MSID spec
# (to prevent accidentally selecting a very large number of MSIDs)
MAX_GLOB_MATCHES = 10
# Special-case state codes that override those in the TDB
STATE_CODES = {
# SIMDIAG
'3SDSWELF': [(0, 'F'), (1, 'T')],
'3SDSYRS': [(0, 'F'), (1, 'T')],
'3SDWMRS': [(0, 'F'), (1, 'T')],
# SIM_MRG
'3TSCMOVE': [(0, 'F'), (1, 'T')],
'3FAMOVE': [(0, 'F'), (1, 'T')],
'3SEAID': [(0, 'SEA-A'), (1, 'SEA-B')],
'3SEARSET': [(0, 'F'), (1, 'T')],
'3SEAROMF': [(0, 'F'), (1, 'T')],
'3SEAINCM': [(0, 'F'), (1, 'T')],
'3STAB2EN': [(0, 'DISABLE'), (1, 'ENABLE')],
'3SMOTPEN': [(0, 'ENABLE'), (1, 'DISABLE')],
'3SMOTSEL': [(0, 'TSC'), (1, 'FA')],
'3SHTREN': [(0, 'DISABLE'), (1, 'ENABLE')],
'3SEARAMF': [(0, 'F'), (1, 'T')],
}
# Cached version (by content type) of first and last available times in archive
CONTENT_TIME_RANGES = {}
# Default source of data.
DEFAULT_DATA_SOURCE = 'cxc'
class _DataSource(object):
"""
Context manager and quasi-singleton configuration object for managing the
data_source(s) used for fetching telemetry.
"""
_data_sources = (DEFAULT_DATA_SOURCE,)
_allowed = ('cxc', 'maude', 'test-drop-half')
def __init__(self, *data_sources):
self._new_data_sources = data_sources
def __enter__(self):
self._orig_data_sources = self.__class__._data_sources
self.set(*self._new_data_sources)
def __exit__(self, type, value, traceback):
self.__class__._data_sources = self._orig_data_sources
@classmethod
def set(cls, *data_sources):
"""
Set current data sources.
:param *data_sources: one or more sources (str)
"""
if any(data_source.split()[0] not in cls._allowed for data_source in data_sources):
raise ValueError('data_sources {} not in allowed set {}'
.format(data_sources, cls._allowed))
if len(data_sources) == 0:
raise ValueError('must select at least one data source in {}'
.format(cls._allowed))
cls._data_sources = data_sources
@classmethod
def sources(cls, include_test=True):
"""
Get tuple of current data sources names.
:param include_test: include sources that start with 'test'
:returns: tuple of data source names
"""
if include_test:
sources = cls._data_sources
else:
sources = [x for x in cls._data_sources if not x.startswith('test')]
return tuple(source.split()[0] for source in sources)
@classmethod
def get_msids(cls, source):
"""
Get the set of MSID names corresponding to ``source`` (e.g. 'cxc' or 'maude')
:param source: str
:returns: set of MSIDs
"""
source = source.split()[0]
if source == 'cxc':
out = list(content.keys())
elif source == 'maude':
import maude
out = list(maude.MSIDS.keys())
else:
raise ValueError('source must be "cxc" or "msid"')
return set(out)
@classmethod
def options(cls):
"""
Get the data sources and corresponding options as a dict.
Example::
>>> data_source.set('cxc', 'maude allow_subset=False')
>>> data_source.options()
{'cxc': {}, 'maude': {'allow_subset': False}}
:returns: dict of data source options
"""
import ast
out = {}
for source in cls._data_sources:
vals = source.split()
name, opts = vals[0], vals[1:]
out[name] = {}
for opt in opts:
key, val = opt.split('=')
val = ast.literal_eval(val)
out[name][key] = val
return out
# Public interface is a "data_source" module attribute
data_source = _DataSource
def local_or_remote_function(remote_print_output):
"""
Decorator maker so that a function gets run either locally or remotely
depending on the state of remote_access.access_remotely. This decorator
maker takes an optional remote_print_output argument that will be
be printed (locally) if the function is executed remotely,
For functions that are decorated using this wrapper:
Every path that may be generated locally but used remotely should be
split with _split_path(). Conversely the functions that use
the resultant path should re-join them with os.path.join. In the
remote case the join will happen using the remote rules.
"""
def the_decorator(func):
def wrapper(*args, **kwargs):
if remote_access.access_remotely:
# If accessing a remote archive, establish the connection (if
# necessary)
if not remote_access.connection_is_established():
try:
if not remote_access.establish_connection():
raise remote_access.RemoteConnectionError(
"Unable to establish connection for remote fetch.")
except EOFError:
# An EOF error can be raised if the python interpreter is being
# called in such a way that input cannot be received from the
# user (e.g. when the python interpreter is called from MATLAB)
# If that is the case (and remote access is enabled), then
# raise an import error
raise ImportError("Unable to interactively get remote access "
"info from user.")
# Print the output, if specified
if remote_access.show_print_output and not remote_print_output is None:
print(remote_print_output)
sys.stdout.flush()
# Execute the function remotely and return the result
return remote_access.execute_remotely(func, *args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
return the_decorator
def _split_path(path):
"""
Return a tuple of the components for ``path``. Strip off the drive if
it exists. This works correctly for the local OS (linux / windows).
"""
drive, path = os.path.splitdrive(path)
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path == "\\":
folders.append("/")
elif path != "":
folders.append(path)
break
folders.reverse()
return folders
def _get_start_stop_dates(times):
if len(times) == 0:
return {}
else:
return {'start': DateTime(times[0]).date,
'stop': DateTime(times[-1]).date}
# Context dictionary to provide context for msid_files
ft = pyyaks.context.ContextDict('ft')
# Global (eng_archive) definition of file names
msid_files = pyyaks.context.ContextDict('msid_files', basedir=ENG_ARCHIVE)
msid_files.update(file_defs.msid_files)
# Module-level values defining available content types and column (MSID) names.
# Then convert from astropy Table to recarray for API stability.
# Note that filetypes.as_array().view(np.recarray) does not quite work...
filetypes = ascii.read(os.path.join(DIR_PATH, 'filetypes.dat'))
filetypes_arr = filetypes.as_array()
filetypes = np.recarray(len(filetypes_arr), dtype=filetypes_arr.dtype)
filetypes[()] = filetypes_arr
content = collections.OrderedDict()
# Get the list of filenames (an array is built to pass all the filenames at
# once to the remote machine since passing them one at a time is rather slow)
all_msid_names_files = dict()
for filetype in filetypes:
ft['content'] = filetype['content'].lower()
all_msid_names_files[str(ft['content'])] = \
_split_path(msid_files['colnames'].abs)
# Function to load MSID names from the files (executed remotely, if necessary)
@local_or_remote_function("Loading MSID names from Ska eng archive server...")
def load_msid_names(all_msid_names_files):
from six.moves import cPickle as pickle
all_colnames = dict()
for k, msid_names_file in six.iteritems(all_msid_names_files):
try:
all_colnames[k] = pickle.load(open(os.path.join(*msid_names_file), 'rb'))
except IOError:
pass
return all_colnames
# Load the MSID names
all_colnames = load_msid_names(all_msid_names_files)
# Save the names
for k, colnames in six.iteritems(all_colnames):
content.update((x, k) for x in sorted(colnames)
if x not in IGNORE_COLNAMES)
# Cache of the most-recently used TIME array and associated bad values mask.
# The key is (content_type, tstart, tstop).
times_cache = dict(key=None)
# Set up logging.
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('Ska.engarchive.fetch')
logger.addHandler(NullHandler())
logger.propagate = False
# Warn the user if ENG_ARCHIVE is set such that the data path is non-standard
if os.getenv('ENG_ARCHIVE'):
print('fetch: using ENG_ARCHIVE={} for archive path'
.format(os.getenv('ENG_ARCHIVE')))
def get_units():
"""Get the unit system currently being used for conversions.
"""
return UNITS['system']
def set_units(unit_system):
"""Set the unit system used for output telemetry values. The default
is "cxc". Allowed values for ``unit_system`` are:
==== ==============================================================
cxc FITS standard units used in CXC archive files (basically MKS)
sci Same as "cxc" but with temperatures in degC instead of Kelvins
eng OCC engineering units (TDB P009, e.g. degF, ft-lb-sec, PSI)
==== ==============================================================
:param unit_system: system of units (cxc, sci, eng)
"""
UNITS.set_units(unit_system)
def read_bad_times(table):
"""Include a list of bad times from ``table`` in the fetch module
``bad_times`` registry. This routine can be called multiple times with
different tables and the bad times will be appended to the registry. The
table can include any number of bad time interval specifications, one per
line. A bad time interval line has three columns separated by whitespace,
e.g.::
aogbias1 2008:292:00:00:00 2008:297:00:00:00
The MSID name is not case sensitive and the time values can be in any
``DateTime`` format. Blank lines and any line starting with the #
character are ignored.
"""
bad_times = ascii.read(table, format='no_header',
names=['msid', 'start', 'stop'])
for msid, start, stop in bad_times:
msid_bad_times.setdefault(msid.upper(), []).append((start, stop))
# Set up bad times dict
msid_bad_times = dict()
read_bad_times(os.path.join(DIR_PATH, 'msid_bad_times.dat'))
def msid_glob(msid):
"""Get the archive MSIDs matching ``msid``.
The function returns a tuple of (msids, MSIDs) where ``msids`` is a list of
MSIDs that is all lower case and (where possible) matches the input
``msid``. The output ``MSIDs`` is all upper case and corresponds to the
exact MSID names stored in the archive HDF5 files.
:param msid: input MSID glob
:returns: tuple (msids, MSIDs)
"""
msids = collections.OrderedDict()
MSIDS = collections.OrderedDict()
sources = data_source.sources(include_test=False)
for source in sources:
ms, MS = _msid_glob(msid, source)
msids.update((m, None) for m in ms)
MSIDS.update((m, None) for m in MS)
if not msids:
raise ValueError('MSID {!r} is not in {} data source(s)'
.format(msid, ' or '.join(x.upper() for x in sources)))
return list(msids), list(MSIDS)
def _msid_glob(msid, source):
"""Get the archive MSIDs matching ``msid``.
The function returns a tuple of (msids, MSIDs) where ``msids`` is a list of
MSIDs that is all lower case and (where possible) matches the input
``msid``. The output ``MSIDs`` is all upper case and corresponds to the
exact MSID names stored in the archive HDF5 files.
:param msid: input MSID glob
:returns: tuple (msids, MSIDs)
"""
source_msids = data_source.get_msids(source)
MSID = msid.upper()
# First try MSID or DP_<MSID>. If success then return the upper
# case version and whatever the user supplied (could be any case).
for match in (MSID, 'DP_' + MSID):
if match in source_msids:
return [msid], [match]
# Next try as a file glob. If there is a match then return a
# list of matches, all lower case and all upper case. Since the
# input was a glob the returned msids are just lower case versions
# of the matched upper case MSIDs.
for match in (MSID, 'DP_' + MSID):
matches = fnmatch.filter(source_msids, match)
if matches:
if len(matches) > MAX_GLOB_MATCHES:
raise ValueError(
'MSID spec {} matches more than {} MSIDs. '
'Refine the spec or increase fetch.MAX_GLOB_MATCHES'
.format(msid, MAX_GLOB_MATCHES))
return [x.lower() for x in matches], matches
# msid not found for this data source
return [], []
def _get_table_intervals_as_list(table, check_overlaps=True):
"""
Determine if the input ``table`` looks like a table of intervals. This can either be
a structured array / Table with datestart / datestop or tstart / tstop columns,
OR a list of lists.
If so, return a list of corresponding start/stop tuples, otherwise return None.
If ``check_overlaps`` is True then a check is made to assure that the supplied
intervals do not overlap. This is needed when reading multiple intervals with
a single call to fetch, but not for bad times filtering.
"""
intervals = None
if isinstance(table, (list, tuple)):
try:
intervals = [(DateTime(row[0]).secs, DateTime(row[1]).secs)
for row in table]
except:
pass
else:
for prefix in ('date', 't'):
start = prefix + 'start'
stop = prefix + 'stop'
try:
intervals = [(DateTime(row[start]).secs, DateTime(row[stop]).secs)
for row in table]
except:
pass
else:
break
# Got an intervals list, now sort
if check_overlaps and intervals is not None:
intervals = sorted(intervals, key=lambda x: x[0])
# Check for overlaps
if any(i0[1] > i1[0] for i0, i1 in zip(intervals[:-1], intervals[1:])):
raise ValueError('Input intervals overlap')
return intervals
class MSID(object):
"""Fetch data from the engineering telemetry archive into an MSID object.
The input ``msid`` is case-insensitive and can include linux file "glob"
patterns, for instance ``orb*1*_x`` (ORBITEPHEM1_X) or ``*pcadmd``
(AOPCADMD). For derived parameters the initial ``DP_`` is optional, for
instance ``dpa_pow*`` (DP_DPA_POWER).
:param msid: name of MSID (case-insensitive)
:param start: start date of telemetry (Chandra.Time compatible)
:param stop: stop date of telemetry (current time if not supplied)
:param filter_bad: automatically filter out bad values
:param stat: return 5-minute or daily statistics ('5min' or 'daily')
:returns: MSID instance
"""
units = UNITS
fetch = sys.modules[__name__]
def __init__(self, msid, start=LAUNCH_DATE, stop=None, filter_bad=False, stat=None):
msids, MSIDs = msid_glob(msid)
if len(MSIDs) > 1:
raise ValueError('Multiple matches for {} in Eng Archive'
.format(msid))
else:
self.msid = msids[0]
self.MSID = MSIDs[0]
# Capture the current module units
self.units = Units(self.units['system'])
self.unit = self.units.get_msid_unit(self.MSID)
self.stat = stat
if stat:
self.dt = {'5min': 328, 'daily': 86400}[stat]
# If ``start`` is actually a table of intervals then fetch
# each interval separately and concatenate the results
intervals = _get_table_intervals_as_list(start, check_overlaps=True)
if intervals is not None:
start, stop = intervals[0][0], intervals[-1][1]
self.tstart = DateTime(start).secs
self.tstop = (DateTime(stop).secs if stop else
DateTime(time.time(), format='unix').secs)
self.datestart = DateTime(self.tstart).date
self.datestop = DateTime(self.tstop).date
self.data_source = {}
self.content = content.get(self.MSID)
if self.datestart < DATE2000_LO and self.datestop > DATE2000_HI:
intervals = [(self.datestart, DATE2000_HI),
(DATE2000_HI, self.datestop)]
# Get the times, values, bad values mask from the HDF5 files archive
if intervals is None:
self._get_data()
else:
self._get_data_over_intervals(intervals)
# If requested filter out bad values and set self.bad = None
if filter_bad:
self.filter_bad()
def __len__(self):
return len(self.vals)
@property
def dtype(self):
return self.vals.dtype
def __repr__(self):
attrs = [self.__class__.__name__]
for name, val in (('start', self.datestart),
('stop', self.datestop),
('len', len(self)),
('dtype', self.dtype.name),
('unit', self.unit),
('stat', self.stat)):
if val is not None:
attrs.append('{}={}'.format(name, val))
return '<' + ' '.join(attrs) + '>'
def _get_data_over_intervals(self, intervals):
"""
Fetch intervals separately and concatenate the results.
"""
msids = []
for start, stop in intervals:
msids.append(self.fetch.MSID(self.msid, start, stop, filter_bad=False, stat=self.stat))
# No bad values column for stat='5min' or 'daily', but still need this attribute.
if self.stat:
self.bads = None
self.colnames = msids[0].colnames
for attr in self.colnames:
vals = np.concatenate([getattr(msid, attr) for msid in msids])
setattr(self, attr, vals)
def _get_data(self):
"""Get data from the Eng archive"""
logger.info('Getting data for %s between %s to %s',
self.msid, self.datestart, self.datestop)
# Avoid stomping on caller's filetype 'ft' values with _cache_ft()
with _cache_ft():
ft['content'] = self.content
ft['msid'] = self.MSID
with _set_msid_files_basedir(self.datestart):
if self.stat:
if 'maude' in data_source.sources():
raise ValueError('MAUDE data source does not support telemetry statistics')
ft['interval'] = self.stat
self._get_stat_data()
else:
self.colnames = ['vals', 'times', 'bads']
args = (self.content, self.tstart, self.tstop, self.MSID, self.units['system'])
if ('cxc' in data_source.sources() and
self.MSID in data_source.get_msids('cxc')):
# CACHE is normally True only when doing ingest processing. Note
# also that to support caching the get_msid_data_from_cxc_cached
# method must be static.
get_msid_data = (self._get_msid_data_from_cxc_cached if CACHE
else self._get_msid_data_from_cxc)
self.vals, self.times, self.bads = get_msid_data(*args)
self.data_source['cxc'] = _get_start_stop_dates(self.times)
if 'test-drop-half' in data_source.sources() and hasattr(self, 'vals'):
# For testing purposes drop half the data off the end. This assumes another
# data_source like 'cxc' has been selected.
idx = len(self.vals) // 2
self.vals = self.vals[:idx]
self.times = self.times[:idx]
self.bads = self.bads[:idx]
# Following assumes only one prior data source but ok for controlled testing
for source in self.data_source:
self.data_source[source] = _get_start_stop_dates(self.times)
if ('maude' in data_source.sources() and
self.MSID in data_source.get_msids('maude')):
# Update self.vals, times, bads in place. This might concatenate MAUDE
# telemetry to existing CXC values.
self._get_msid_data_from_maude(*args)
def _get_stat_data(self):
"""Do the actual work of getting stats values for an MSID from HDF5
files"""
filename = msid_files['stats'].abs
logger.info('Opening %s', filename)
@local_or_remote_function("Getting stat data for " + self.MSID +
" from Ska eng archive server...")
def get_stat_data_from_server(filename, dt, tstart, tstop):
import tables
open_file = getattr(tables, 'open_file', None) or tables.openFile
h5 = open_file(os.path.join(*filename))
table = h5.root.data
times = (table.col('index') + 0.5) * dt
row0, row1 = np.searchsorted(times, [tstart, tstop])
table_rows = table[row0:row1] # returns np.ndarray (structured array)
h5.close()
return (times[row0:row1], table_rows, row0, row1)
times, table_rows, row0, row1 = \
get_stat_data_from_server(_split_path(filename),
self.dt, self.tstart, self.tstop)
logger.info('Closed %s', filename)
self.bads = None
self.times = times
self.colnames = ['times']
for colname in table_rows.dtype.names:
# Don't like the way columns were named in the stats tables.
# Fix that here.
colname_out = _plural(colname) if colname != 'n' else 'samples'
if colname_out in ('vals', 'mins', 'maxes', 'means',
'p01s', 'p05s', 'p16s', 'p50s',
'p84s', 'p95s', 'p99s'):
vals = self.units.convert(self.MSID, table_rows[colname])
elif colname_out == 'stds':
vals = self.units.convert(self.MSID, table_rows[colname],
delta_val=True)
else:
vals = table_rows[colname]
setattr(self, colname_out, vals)
self.colnames.append(colname_out)
# Redefine the 'vals' attribute to be 'means' if it exists. This is a
# more consistent use of the 'vals' attribute and there is little use
# for the original sampled version.
if hasattr(self, 'means'):
# Create new attribute midvals and add as a column (fixes kadi#17)
self.colnames.append('midvals')
self.midvals = self.vals
self.vals = self.means
# Possibly convert vals to unicode for Python 3+. If this MSID is a
# state-valued MSID (with string value) then `vals` is the only possible
# string attribute. None of the others like mins/maxes etc will exist.
if not six.PY2:
for colname in self.colnames:
vals = getattr(self, colname)
if vals.dtype.kind == 'S':
setattr(self, colname, vals.astype('U'))
@staticmethod
@cache.lru_cache(30)
def _get_msid_data_from_cxc_cached(content, tstart, tstop, msid, unit_system):
"""Do the actual work of getting time and values for an MSID from HDF5
files and cache recent results. Caching is very beneficial for derived
parameter updates but not desirable for normal fetch usage."""
return MSID._get_msid_data_from_cxc(content, tstart, tstop, msid, unit_system)
@staticmethod
def _get_msid_data_from_cxc(content, tstart, tstop, msid, unit_system):
"""Do the actual work of getting time and values for an MSID from HDF5
files"""
# Get a row slice into HDF5 file for this content type that picks out
# the required time range plus a little padding on each end.
h5_slice = get_interval(content, tstart, tstop)
# Read the TIME values either from cache or from disk.
if times_cache['key'] == (content, tstart, tstop):
logger.info('Using times_cache for %s %s to %s',
content, tstart, tstop)
times = times_cache['val'] # Already filtered on times_ok
times_ok = times_cache['ok'] # For filtering MSID.val and MSID.bad
times_all_ok = times_cache['all_ok']
else:
ft['msid'] = 'time'
filename = msid_files['msid'].abs
logger.info('Reading %s', filename)
@local_or_remote_function("Getting time data from Ska eng archive server...")
def get_time_data_from_server(h5_slice, filename):
import tables
open_file = getattr(tables, 'open_file', None) or tables.openFile
h5 = open_file(os.path.join(*filename))
times_ok = ~h5.root.quality[h5_slice]
times = h5.root.data[h5_slice]
h5.close()
return(times_ok, times)
times_ok, times = get_time_data_from_server(h5_slice, _split_path(filename))
# Filter bad times. Last instance of bad times in archive is 2004
# so don't do this unless needed. Creating a new 'times' array is
# much more expensive than checking for np.all(times_ok).
times_all_ok = np.all(times_ok)
if not times_all_ok:
times = times[times_ok]
times_cache.update(dict(key=(content, tstart, tstop),
val=times,
ok=times_ok,
all_ok=times_all_ok))
# Extract the actual MSID values and bad values mask
ft['msid'] = msid
filename = msid_files['msid'].abs
logger.info('Reading %s', filename)
@local_or_remote_function("Getting msid data for " + msid +
" from Ska eng archive server...")
def get_msid_data_from_server(h5_slice, filename):
import tables
open_file = getattr(tables, 'open_file', None) or tables.openFile
h5 = open_file(os.path.join(*filename))
vals = h5.root.data[h5_slice]
bads = h5.root.quality[h5_slice]
h5.close()
return(vals, bads)
vals, bads = get_msid_data_from_server(h5_slice, _split_path(filename))
# Filter bad times rows if needed
if not times_all_ok:
logger.info('Filtering bad times values for %s', msid)
bads = bads[times_ok]
vals = vals[times_ok]
# Slice down to exact requested time range
row0, row1 = np.searchsorted(times, [tstart, tstop])
logger.info('Slicing %s arrays [%d:%d]', msid, row0, row1)
vals = Units(unit_system).convert(msid.upper(), vals[row0:row1])
times = times[row0:row1]
bads = bads[row0:row1]
# Possibly expand the bads list for a set of about 30 MSIDs which
# have incorrect values in CXCDS telemetry
bads = _fix_ctu_dwell_mode_bads(msid, bads)
# In Python 3+ change bytestring to (unicode) string
if not six.PY2 and vals.dtype.kind == 'S':
vals = vals.astype('U')
return (vals, times, bads)
def _get_msid_data_from_maude(self, content, tstart, tstop, msid, unit_system):
"""
Get time and values for an MSID from MAUDE.
Returned values are (for now) all assumed to be good.
"""
import maude
# Telemetry values from another data_source may already be available. If
# so then only query MAUDE from after the last available point.
telem_already = hasattr(self, 'times') and len(self.times) > 2
if telem_already:
tstart = self.times[-1] + 0.001 # Don't fetch the last point again
dt = self.times[-1] - self.times[-2]
if tstop - tstart < dt * 2:
# Already got enough data from the original query, no need to hit MAUDE
return
# Actually query MAUDE
options = data_source.options()['maude']
try:
out = maude.get_msids(msids=msid, start=tstart, stop=tstop, **options)
except Exception as e:
raise Exception('MAUDE query failed: {}'.format(e))
# Only one MSID is queried from MAUDE but maude.get_msids() already returns
# a list of results, so select the first element.
out = out['data'][0]
vals = Units(unit_system).convert(msid.upper(), out['values'], from_system='eng')
times = out['times']
bads = np.zeros(len(vals), dtype=bool) # No 'bad' values from MAUDE
self.data_source['maude'] = _get_start_stop_dates(times)
self.data_source['maude']['flags'] = out['flags']
if telem_already:
vals = np.concatenate([self.vals, vals])
times = np.concatenate([self.times, times])
bads = np.concatenate([self.bads, bads])
self.vals = vals
self.times = times
self.bads = bads
@property
def state_codes(self):
"""List of state codes tuples (raw_count, state_code) for state-valued
MSIDs
"""
if self.vals.dtype.kind not in ('S', 'U'):
self._state_codes = None
if self.MSID in STATE_CODES:
self._state_codes = STATE_CODES[self.MSID]
if not hasattr(self, '_state_codes'):
import Ska.tdb
try:
states = Ska.tdb.msids[self.MSID].Tsc
except:
self._state_codes = None
else:
if states is None or len(set(states['CALIBRATION_SET_NUM'])) != 1:
warnings.warn('MSID {} has string vals but no state codes '
'or multiple calibration sets'.format(self.msid))
self._state_codes = None
else:
states = np.sort(states.data, order='LOW_RAW_COUNT')
self._state_codes = [(state['LOW_RAW_COUNT'],
state['STATE_CODE']) for state in states]
return self._state_codes
@property
def raw_vals(self):
"""Raw counts corresponding to the string state-code values that are
stored in ``self.vals``
"""
# If this is not a string-type value then there are no raw values
if self.vals.dtype.kind not in ('S', 'U') or self.state_codes is None:
self._raw_vals = None
if not hasattr(self, '_raw_vals'):
self._raw_vals = np.zeros(len(self.vals), dtype='int8') - 1
# CXC state code telem all has same length with trailing spaces
# so find max length for formatting below.
max_len = max(len(x[1]) for x in self.state_codes)
fmtstr = '{:' + str(max_len) + 's}'
for raw_val, state_code in self.state_codes:
ok = self.vals == fmtstr.format(state_code)
self._raw_vals[ok] = raw_val
return self._raw_vals
@property
def tdb(self):
"""Access the Telemetry database entries for this MSID
"""
import Ska.tdb
return Ska.tdb.msids[self.MSID]
def interpolate(self, dt=None, start=None, stop=None, times=None):
"""Perform nearest-neighbor interpolation of the MSID to the specified
time sequence.
The time sequence steps uniformly by ``dt`` seconds starting at the
``start`` time and ending at the ``stop`` time. If not provided the
times default to the first and last times for the MSID.
The MSID ``times`` attribute is set to the common time sequence. In
addition a new attribute ``times0`` is defined that stores the nearest
neighbor interpolated time, providing the *original* timestamps of each
new interpolated value for that MSID.
If ``times`` is provided then this gets used instead of the default linear
progression from ``start`` and ``dt``.
:param dt: time step (sec, default=328.0)
:param start: start of interpolation period (DateTime format)
:param stop: end of interpolation period (DateTime format)
:param times: array of times for interpolation (default=None)
"""
import Ska.Numpy
if times is not None:
if any(kwarg is not None for kwarg in (dt, start, stop)):
raise ValueError('If "times" keyword is set then "dt", "start", '
'and "stop" cannot be set')
# Use user-supplied times that are within the range of telemetry.
ok = (times >= self.times[0]) & (times <= self.times[-1])
times = times[ok]
else:
dt = 328.0 if dt is None else dt
tstart = DateTime(start).secs if start else self.times[0]
tstop = DateTime(stop).secs if stop else self.times[-1]
# Legacy method for backward compatibility. Note that the np.arange()
# call accrues floating point error.
tstart = max(tstart, self.times[0])
tstop = min(tstop, self.times[-1])
times = np.arange(tstart, tstop, dt)
logger.info('Interpolating index for %s', self.msid)
indexes = Ska.Numpy.interpolate(np.arange(len(self.times)),
self.times, times,
method='nearest', sorted=True)
logger.info('Slicing on indexes')
for colname in self.colnames:
colvals = getattr(self, colname)
if colvals is not None:
setattr(self, colname, colvals[indexes])
# Make a new attribute times0 that stores the nearest neighbor
# interpolated times. Then set the MSID times to be the common
# interpolation times.
self.times0 = self.times
self.times = times
def copy(self):
from copy import deepcopy
return deepcopy(self)
def filter_bad(self, bads=None, copy=False):
"""Filter out any bad values.
After applying this method the "bads" column will be set to None to
indicate that there are no bad values.
:param bads: Bad values mask. If not supplied then self.bads is used.
:param copy: return a copy of MSID object with bad values filtered
"""
obj = self.copy() if copy else self
# If bad mask is provided then override any existing bad mask for MSID
if bads is not None:
obj.bads = bads
# Nothing to do if bads is None (i.e. bad values already filtered)
if obj.bads is None:
return
if np.any(obj.bads):
logger.info('Filtering bad values for %s', obj.msid)
ok = ~obj.bads
colnames = (x for x in obj.colnames if x != 'bads')
for colname in colnames:
setattr(obj, colname, getattr(obj, colname)[ok])
obj.bads = None
if copy:
return obj
def filter_bad_times(self, start=None, stop=None, table=None, copy=False):
"""Filter out intervals of bad data in the MSID object.
There are three usage options:
- Supply no arguments. This will use the global list of bad times read
in with fetch.read_bad_times().
- Supply both ``start`` and ``stop`` values where each is a single
value in a valid DateTime format.
- Supply an ``table`` parameter in the form of a 2-column table of
start and stop dates (space-delimited) or the name of a file with
data in the same format.
The ``table`` parameter must be supplied as a table or the name of a
table file, for example::
bad_times = ['2008:292:00:00:00 2008:297:00:00:00',
'2008:305:00:12:00 2008:305:00:12:03',
'2010:101:00:01:12 2010:101:00:01:25']
msid.filter_bad_times(table=bad_times)
msid.filter_bad_times(table='msid_bad_times.dat')
:param start: Start of time interval to exclude (any DateTime format)
:param stop: End of time interval to exclude (any DateTime format)
:param table: Two-column table (start, stop) of bad time intervals
:param copy: return a copy of MSID object with bad times filtered
"""
if table is not None:
bad_times = ascii.read(table, format='no_header',
names=['start', 'stop'])
elif start is None and stop is None:
bad_times = []
for msid_glob, times in msid_bad_times.items():
if fnmatch.fnmatch(self.MSID, msid_glob):
bad_times.extend(times)
elif start is None or stop is None:
raise ValueError('filter_times requires either 2 args '
'(start, stop) or no args')
else:
bad_times = [(start, stop)]
obj = self.copy() if copy else self
obj._filter_times(bad_times, exclude=True)
if copy:
return obj
def remove_intervals(self, intervals, copy=False):
"""
Remove telemetry points that occur within the specified ``intervals``
This method is the converse of select_intervals().
The ``intervals`` argument can be either a list of (start, stop) tuples
or an EventQuery object from kadi.
If ``copy`` is set to True then a copy of the MSID object is made prior
to removing intervals, and that copy is returned. The default is to
remove intervals in place.
This example shows fetching the pitch component of the spacecraft rate.
After examining the rates, the samples during maneuvers are then removed
and the standard deviation is recomputed. This filters out the large
rates during maneuvers::
>>> aorate2 = fetch.Msid('aorate2', '2011:001', '2011:002')
>>> aorate2.vals.mean() * 3600 * 180 / np.pi # rate in arcsec/sec
3.9969393528801782
>>> figure(1)
>>> aorate2.plot(',')
>>> from kadi import events
>>> aorate2.remove_intervals(events.manvrs)
>>> aorate2.vals.mean() * 3600 * 180 / np.pi # rate in arcsec/sec
-0.0003688639491030978
>>> figure(2)
>>> aorate2.plot(',')
:param intervals: EventQuery or iterable (N x 2) with start, stop dates/times
:param copy: return a copy of MSID object with intervals removed
"""
obj = self.copy() if copy else self
obj._filter_times(intervals, exclude=True)
if copy:
return obj
def select_intervals(self, intervals, copy=False):
"""
Select telemetry points that occur within the specified ``intervals``
This method is the converse of remove_intervals().
The ``intervals`` argument can be either a list of (start, stop) tuples
or an EventQuery object from kadi.
If ``copy`` is set to True then a copy of the MSID object is made prior
to selecting intervals, and that copy is returned. The default is to
selecte intervals in place.
This example shows fetching the pitch component of the spacecraft rate.
After examining the rates, the samples during maneuvers are then selected
and the mean is recomputed. This highlights the large rates during
maneuvers::
>>> aorate2 = fetch.Msid('aorate2', '2011:001', '2011:002')
>>> aorate2.vals.mean() * 3600 * 180 / np.pi # rate in arcsec/sec
3.9969393528801782
>>> figure(1)
>>> aorate2.plot(',')
>>> from kadi import events
>>> aorate2.select_intervals(events.manvrs)
>>> aorate2.vals.mean() * 3600 * 180 / np.pi # rate in arcsec/sec
24.764309542605481
>>> figure(2)
>>> aorate2.plot(',')
:param intervals: EventQuery or iterable (N x 2) with start, stop dates/times
:param copy: return a copy of MSID object with intervals selected
"""
obj = self.copy() if copy else self
obj._filter_times(intervals, exclude=False)
if copy:
return obj
def _filter_times(self, intervals, exclude=True):
"""
Filter the times of self based on ``intervals``.
:param intervals: iterable (N x 2) with tstart, tstop in seconds
:param exclude: exclude intervals if True, else include intervals
"""
# Make an initial acceptance mask. If exclude is True then initially
# all values are allowed (ok=True). If exclude is False (i.e. only
# include the interval times) then ok=False everywhere.
ok = np.empty(len(self.times), dtype=bool)
ok[:] = exclude
# See if the input intervals is actually a table of intervals
intervals_list = _get_table_intervals_as_list(intervals, check_overlaps=False)
if intervals_list is not None:
intervals = intervals_list
# Check if this is an EventQuery. Would rather not import EventQuery
# because this is expensive (django), so just look at the names in
# object MRO.
if 'EventQuery' in (cls.__name__ for cls in intervals.__class__.__mro__):
intervals = intervals.intervals(self.datestart, self.datestop)
intervals = [(DateTime(start).secs, DateTime(stop).secs)
for start, stop in intervals]
for tstart, tstop in intervals:
if tstart > tstop:
raise ValueError("Start time %s must be less than stop time %s"
% (tstart, tstop))
if tstop < self.times[0] or tstart > self.times[-1]:
continue
# Find the indexes of bad data. Using side=left,right respectively
# will exclude points exactly equal to the bad_times values
# (though in reality an exact tie is extremely unlikely).
i0 = np.searchsorted(self.times, tstart, side='left')
i1 = np.searchsorted(self.times, tstop, side='right')
ok[i0:i1] = not exclude
colnames = (x for x in self.colnames)
for colname in colnames:
attr = getattr(self, colname)
if isinstance(attr, np.ndarray):
setattr(self, colname, attr[ok])
def write_zip(self, filename, append=False):
"""Write MSID to a zip file named ``filename``
Within the zip archive the data for this MSID will be stored in csv
format with the name <msid_name>.csv.
:param filename: output zipfile name
:param append: append to an existing zipfile
"""
import zipfile
colnames = self.colnames[:]
if self.bads is None and 'bads' in colnames:
colnames.remove('bads')
if self.state_codes:
colnames.append('raw_vals')
# Indexes value is not interesting for output
if 'indexes' in colnames:
colnames.remove('indexes')
colvals = tuple(getattr(self, x) for x in colnames)
fmt = ",".join("%s" for x in colnames)
f = zipfile.ZipFile(filename, ('a' if append
and os.path.exists(filename)
else 'w'))
info = zipfile.ZipInfo(self.msid + '.csv')
info.external_attr = 0o664 << 16 # Set permissions
info.date_time = time.localtime()[:7]
info.compress_type = zipfile.ZIP_DEFLATED
f.writestr(info,
",".join(colnames) + '\n' +
'\n'.join(fmt % x for x in zip(*colvals)) + '\n')
f.close()
def logical_intervals(self, op, val, complete_intervals=True, max_gap=None):
"""Determine contiguous intervals during which the logical comparison
expression "MSID.vals op val" is True. Allowed values for ``op``
are::
== != > < >= <=
If ``complete_intervals`` is True (default) then the intervals are guaranteed to
be complete so that the all reported intervals had a transition before and after
within the telemetry interval.
If ``max_gap`` is specified then any time gaps longer than ``max_gap`` are
filled with a fictitious False value to create an artificial interval
boundary at ``max_gap / 2`` seconds from the nearest data value.
Returns a structured array table with a row for each interval.
Columns are:
* datestart: date of interval start
* datestop: date of interval stop
* duration: duration of interval (sec)
* tstart: time of interval start (CXC sec)
* tstop: time of interval stop (CXC sec)
Examples::
>>> dat = fetch.MSID('aomanend', '2010:001', '2010:005')
>>> manvs = dat.logical_intervals('==', 'NEND')
>>> dat = fetch.MSID('61PSTS02', '1999:200', '2000:001')
>>> safe_suns = dat.logical_intervals('==', 'SSM', complete_intervals=False, max_gap=66)
:param op: logical operator, one of == != > < >= <=
:param val: comparison value
:param complete_intervals: return only complete intervals (default=True)
:param max_gap: max allowed gap between time stamps (sec, default=None)
:returns: structured array table of intervals
"""
from . import utils
ops = {'==': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le}
try:
op = ops[op]
except KeyError:
raise ValueError('op = "{}" is not in allowed values: {}'
.format(op, sorted(ops.keys())))
# Do local version of bad value filtering
if self.bads is not None and np.any(self.bads):
ok = ~self.bads
vals = self.vals[ok]
times = self.times[ok]
else:
vals = self.vals
times = self.times
bools = op(vals, val)
return utils.logical_intervals(times, bools, complete_intervals, max_gap)
def state_intervals(self):
"""Determine contiguous intervals during which the MSID value
is unchanged.
Returns a structured array table with a row for each interval.
Columns are:
* datestart: date of interval start
* datestop: date of interval stop
* duration: duration of interval (sec)
* tstart: time of interval start (CXC sec)
* tstop: time of interval stop (CXC sec)
* val: MSID value during the interval
Example::
dat = fetch.MSID('cobsrqid', '2010:001', '2010:005')
obsids = dat.state_intervals()
:param val: state value for which intervals are returned.
:returns: structured array table of intervals
"""
from . import utils
# Do local version of bad value filtering
if self.bads is not None and np.any(self.bads):
ok = ~self.bads
vals = self.vals[ok]
times = self.times[ok]
else:
vals = self.vals
times = self.times
if len(self.vals) < 2:
raise ValueError('Filtered data length must be at least 2')
return utils.state_intervals(times, vals)
def iplot(self, fmt='-b', fmt_minmax='-c', **plot_kwargs):
"""Make an interactive plot for exploring the MSID data.
This method opens a new plot figure (or clears the current figure) and
plots the MSID ``vals`` versus ``times``. This plot can be panned or
zoomed arbitrarily and the data values will be fetched from the archive
as needed. Depending on the time scale, ``iplot`` displays either full
resolution, 5-minute, or daily values. For 5-minute and daily values
the min and max values are also plotted.
Once the plot is displayed and the window is selected by clicking in
it, the following key commands are recognized::
a: autoscale for full data range in x and y
m: toggle plotting of min/max values
p: pan at cursor x
y: toggle autoscaling of y-axis
z: zoom at cursor x
?: print help
Example::
dat = fetch.Msid('aoattqt1', '2011:001', '2012:001', stat='5min')
dat.iplot()
dat.iplot('.b', '.c', markersize=0.5)
Caveat: the ``iplot()`` method is not meant for use within scripts, and
may give unexpected results if used in combination with other plotting
commands directed at the same plot figure.
:param fmt: plot format for values (default="-b")
:param fmt_minmax: plot format for mins and maxes (default="-c")
:param plot_kwargs: additional plotting keyword args
"""
from .plot import MsidPlot
self._iplot = MsidPlot(self, fmt, fmt_minmax, **plot_kwargs)
def plot(self, *args, **kwargs):
"""Plot the MSID ``vals`` using Ska.Matplotlib.plot_cxctime()
This is a convenience function for plotting the MSID values. It
is equivalent to::
plot_cxctime(self.times, self.vals, *args, **kwargs)
where ``*args`` are additional arguments and ``**kwargs`` are
additional keyword arguments that are accepted by ``plot_cxctime()``.
Example::
dat = fetch.Msid('tephin', '2011:001', '2012:001', stat='5min')
dat.plot('-r', linewidth=2)
"""
import matplotlib.pyplot as plt
from Ska.Matplotlib import plot_cxctime
vals = self.raw_vals if self.state_codes else self.vals
plot_cxctime(self.times, vals, *args, state_codes=self.state_codes,
**kwargs)
plt.margins(0.02, 0.05)
def __len__(self):
return len(self.times)
class MSIDset(collections.OrderedDict):
"""Fetch a set of MSIDs from the engineering telemetry archive.
Each input ``msid`` is case-insensitive and can include linux file "glob"
patterns, for instance ``orb*1*_?`` (ORBITEPHEM1_X, Y and Z) or
``aoattqt[1234]`` (AOATTQT1, 2, 3, and 4). For derived parameters the
initial ``DP_`` is optional, for instance ``dpa_pow*`` (DP_DPA_POWER).
:param msids: list of MSID names (case-insensitive)
:param start: start date of telemetry (Chandra.Time compatible)
:param stop: stop date of telemetry (current time if not supplied)
:param filter_bad: automatically filter out bad values
:param stat: return 5-minute or daily statistics ('5min' or 'daily')
:returns: Dict-like object containing MSID instances keyed by MSID name
"""
MSID = MSID
def __init__(self, msids, start=LAUNCH_DATE, stop=None, filter_bad=False, stat=None):
super(MSIDset, self).__init__()
intervals = _get_table_intervals_as_list(start, check_overlaps=True)
if intervals is not None:
start, stop = intervals[0][0], intervals[-1][1]
self.tstart = DateTime(start).secs
self.tstop = (DateTime(stop).secs if stop else DateTime().secs)
self.datestart = DateTime(self.tstart).date
self.datestop = DateTime(self.tstop).date
# Input ``msids`` may contain globs, so expand each and add to new list
new_msids = []
for msid in msids:
new_msids.extend(msid_glob(msid)[0])
for msid in new_msids:
if intervals is None:
self[msid] = self.MSID(msid, self.tstart, self.tstop,
filter_bad=False, stat=stat)
else:
self[msid] = self.MSID(msid, intervals, filter_bad=False, stat=stat)
if filter_bad:
self.filter_bad()
def __deepcopy__(self, memo=None):
out = self.__class__([], None)
for attr in ('tstart', 'tstop', 'datestart', 'datestop'):
setattr(out, attr, getattr(self, attr))
for msid in self:
out[msid] = self[msid].copy()
return out
def copy(self):
return self.__deepcopy__()
def filter_bad(self, copy=False):
"""Filter bad values for the MSID set.
This function applies the union (logical-OR) of bad value masks for all
MSIDs in the set with the same content type. The result is that the
filtered MSID samples are valid for *all* MSIDs within the
content type and the arrays all match up.
For example::
msids = fetch.MSIDset(['aorate1', 'aorate2', 'aogyrct1', 'aogyrct2'],
'2009:001', '2009:002')
msids.filter_bad()
Since ``aorate1`` and ``aorate2`` both have content type of
``pcad3eng`` they will be filtered as a group and will remain with the
same sampling. This will allow something like::
plot(msids['aorate1'].vals, msids['aorate2'].vals)
Likewise the two gyro count MSIDs would be filtered as a group. If
this group-filtering is not the desired behavior one can always call
the individual MSID.filter_bad() function for each MSID in the set::
for msid in msids.values():
msid.filter_bad()
:param copy: return a copy of MSID object with intervals selected
"""
obj = self.copy() if copy else self
for content in set(x.content for x in obj.values()):
bads = None
msids = [x for x in obj.values()
if x.content == content and x.bads is not None]
for msid in msids:
if bads is None:
bads = msid.bads.copy()
else:
bads |= msid.bads
for msid in msids:
msid.filter_bad(bads)
if copy:
return obj
def filter_bad_times(self, start=None, stop=None, table=None, copy=False):
"""Filter out intervals of bad data in the MSIDset object.
There are three usage options:
- Supply no arguments. This will use the global list of bad times read
in with fetch.read_bad_times().
- Supply both ``start`` and ``stop`` values where each is a single
value in a valid DateTime format.
- Supply an ``table`` parameter in the form of a 2-column table of
start and stop dates (space-delimited) or the name of a file with
data in the same format.
The ``table`` parameter must be supplied as a table or the name of a
table file, for example::
msidset.filter_bad_times()
bad_times = ['2008:292:00:00:00 2008:297:00:00:00',
'2008:305:00:12:00 2008:305:00:12:03',
'2010:101:00:01:12 2010:101:00:01:25']
msidset.filter_bad_times(table=bad_times)
msidset.filter_bad_times(table='msid_bad_times.dat')
:param start: Start of time interval to exclude (any DateTime format)
:param stop: End of time interval to exclude (any DateTime format)
:param table: Two-column table (start, stop) of bad time intervals
:param copy: return a copy of MSID object with intervals selected
"""
obj = self.copy() if copy else self
for msid in obj.values():
msid.filter_bad_times(start, stop, table)
if copy:
return obj
def interpolate(self, dt=None, start=None, stop=None, filter_bad=True, times=None,
bad_union=False, copy=False):
"""
Perform nearest-neighbor interpolation of all MSID values in the set
to a common time sequence. The values are updated in-place.
**Times**
The time sequence steps uniformly by ``dt`` seconds starting at the
``start`` time and ending at the ``stop`` time. If not provided the
times default to the ``start`` and ``stop`` times for the MSID set.
If ``times`` is provided then this gets used instead of the default linear
progression from ``start`` and ``dt``.
For each MSID in the set the ``times`` attribute is set to the common
time sequence. In addition a new attribute ``times0`` is defined that
stores the nearest neighbor interpolated time, providing the *original*
timestamps of each new interpolated value for that MSID.
**Filtering and bad values**
If ``filter_bad`` is True (default) then bad values are filtered from
the interpolated MSID set. There are two strategies for doing this:
1) ``bad_union = False``
Remove the bad values in each MSID prior to interpolating the set to
a common time series. This essentially says to use all the available
data individually. Each MSID has bad data filtered individually
*before* interpolation so that the nearest neighbor interpolation only
finds good data. This strategy is done when ``filter_union = False``,
which is the default setting.
2) ``bad_union = True``
Mark every MSID in the set as bad at the interpolated time if *any*
of them are bad at that time. This stricter version is required when it
is important that the MSIDs be truly correlated in time. For instance
this is needed for attitude quaternions since all four values must be
from the exact same telemetry sample. If you are not sure, this is the
safer option.
:param dt: time step (sec, default=328.0)
:param start: start of interpolation period (DateTime format)
:param stop: end of interpolation period (DateTime format)
:param filter_bad: filter bad values
:param times: array of times for interpolation (default=None)
:param bad_union: filter union of bad values after interpolating
:param copy: return a new copy instead of in-place update (default=False)
"""
import Ska.Numpy
obj = self.copy() if copy else self
msids = list(obj.values()) # MSID objects in the MSIDset
# Ensure that tstart / tstop is entirely within the range of available
# data fetched from the archive.
max_fetch_tstart = max(msid.times[0] for msid in msids)
min_fetch_tstop = min(msid.times[-1] for msid in msids)
if times is not None:
if any(kwarg is not None for kwarg in (dt, start, stop)):
raise ValueError('If "times" keyword is set then "dt", "start", '
'and "stop" cannot be set')
# Use user-supplied times that are within the range of telemetry.
ok = (times >= max_fetch_tstart) & (times <= min_fetch_tstop)
obj.times = times[ok]
else:
# Get the nominal tstart / tstop range
dt = 328.0 if dt is None else dt
tstart = DateTime(start).secs if start else obj.tstart
tstop = DateTime(stop).secs if stop else obj.tstop
tstart = max(tstart, max_fetch_tstart)
tstop = min(tstop, min_fetch_tstop)
obj.times = np.arange((tstop - tstart) // dt + 1) * dt + tstart
for msid in msids:
if filter_bad and not bad_union:
msid.filter_bad()
logger.info('Interpolating index for %s', msid.msid)
indexes = Ska.Numpy.interpolate(np.arange(len(msid.times)),
msid.times, obj.times,
method='nearest', sorted=True)
logger.info('Slicing on indexes')
for colname in msid.colnames:
colvals = getattr(msid, colname)
if colvals is not None:
setattr(msid, colname, colvals[indexes])
# Make a new attribute times0 that stores the nearest neighbor
# interpolated times. Then set the MSID times to be the common
# interpolation times.
msid.times0 = msid.times
msid.times = obj.times
if bad_union:
common_bads = np.zeros(len(obj.times), dtype=bool)
for msid in msids:
if msid.stat is None and msid.bads is None:
warnings.warn('WARNING: {!r} MSID has bad values already filtered.\n'
'This prevents `filter_bad_union` from working as expected.\n'
'Use MSIDset (not Msidset) with filter_bad=False.\n')
if msid.bads is not None: # 5min and daily stats have no bad values
common_bads |= msid.bads
# Apply the common bads array and optional filter out these bad values
for msid in msids:
msid.bads = common_bads
if filter_bad:
msid.filter_bad()
# Filter MSIDset-level times attr to match invididual MSIDs if filter_bad is True
if filter_bad:
obj.times = obj.times[~common_bads]
if copy:
return obj
def write_zip(self, filename):
"""Write MSIDset to a zip file named ``filename``
Within the zip archive the data for each MSID in the set will be stored
in csv format with the name <msid_name>.csv.
:param filename: output zipfile name
"""
append = False
for msid in self.values():
msid.write_zip(filename, append=append)
append = True
class Msid(MSID):
"""
Fetch data from the engineering telemetry archive into an MSID object.
Same as MSID class but with filter_bad=True by default.
:param msid: name of MSID (case-insensitive)
:param start: start date of telemetry (Chandra.Time compatible)
:param stop: stop date of telemetry (current time if not supplied)
:param filter_bad: automatically filter out bad values
:param stat: return 5-minute or daily statistics ('5min' or 'daily')
:param unit_system: Unit system (cxc|eng|sci, default=current units)
:returns: MSID instance
"""
units = UNITS
def __init__(self, msid, start=LAUNCH_DATE, stop=None, filter_bad=True, stat=None):
super(Msid, self).__init__(msid, start=start, stop=stop,
filter_bad=filter_bad, stat=stat)
class Msidset(MSIDset):
"""Fetch a set of MSIDs from the engineering telemetry archive.
Same as MSIDset class but with filter_bad=True by default.
:param msids: list of MSID names (case-insensitive)
:param start: start date of telemetry (Chandra.Time compatible)
:param stop: stop date of telemetry (current time if not supplied)
:param filter_bad: automatically filter out bad values
:param stat: return 5-minute or daily statistics ('5min' or 'daily')
:param unit_system: Unit system (cxc|eng|sci, default=current units)
:returns: Dict-like object containing MSID instances keyed by MSID name
"""
MSID = MSID
def __init__(self, msids, start=LAUNCH_DATE, stop=None, filter_bad=True, stat=None):
super(Msidset, self).__init__(msids, start=start, stop=stop,
filter_bad=filter_bad, stat=stat)
class HrcSsMsid(Msid):
"""
Fetch data from the engineering telemetry archive into an MSID object.
Same as MSID class but with filter_bad=True by default.
:param msid: name of MSID (case-insensitive)
:param start: start date of telemetry (Chandra.Time compatible)
:param stop: stop date of telemetry (current time if not supplied)
:param filter_bad: automatically filter out bad values
:param stat: return 5-minute or daily statistics ('5min' or 'daily')
:param unit_system: Unit system (cxc|eng|sci, default=current units)
:returns: MSID instance
"""
units = UNITS
def __new__(self, msid, start=LAUNCH_DATE, stop=None, stat=None):
ss_msids = '2TLEV1RT 2VLEV1RT 2SHEV1RT 2TLEV2RT 2VLEV2RT 2SHEV2RT'
if msid.upper() not in ss_msids.split():
raise ValueError('MSID {} is not in HRC secondary science ({})'
.format(msid, ss_msids))
# If this is not full-resolution then add boolean bads mask to individual MSIDs
msids = [msid, 'HRC_SS_HK_BAD']
out = MSIDset(msids, start=start, stop=stop, stat=stat)
if stat is not None:
for m in msids:
out[m].bads = np.zeros(len(out[m].vals), dtype=np.bool)
# Set bad mask
i_bads = np.flatnonzero(out['HRC_SS_HK_BAD'].vals > 0)
out['HRC_SS_HK_BAD'].bads[i_bads] = True
# For full-resolution smear the bad mask out by +/- 5 samples
if stat is None:
for i_bad in i_bads:
i0 = max(0, i_bad - 5)
i1 = i_bad + 5
out['HRC_SS_HK_BAD'].bads[i0:i1] = True
# Finally interpolate and filter out bad values
out.interpolate(times=out[msid].times, bad_union=True, filter_bad=True)
return out[msid]
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def get_time_range(msid, format=None):
"""
Get the time range for the given ``msid``.
:param msid: MSID name
:param format: Output format (DateTime format, e.g. 'secs', 'date', 'greta')
:returns: (tstart, tstop) in CXC seconds
"""
MSID = msid.upper()
with _cache_ft():
ft['content'] = content[MSID]
ft['msid'] = 'time'
filename = msid_files['msid'].abs
logger.info('Reading %s', filename)
@local_or_remote_function("Getting time range from Ska eng archive server...")
def get_time_data_from_server(filename):
import tables
open_file = getattr(tables, 'open_file', None) or tables.openFile
h5 = open_file(os.path.join(*filename))
tstart = h5.root.data[0]
tstop = h5.root.data[-1]
h5.close()
return tstart, tstop
if filename in CONTENT_TIME_RANGES:
tstart, tstop = CONTENT_TIME_RANGES[filename]
else:
tstart, tstop = get_time_data_from_server(_split_path(filename))
CONTENT_TIME_RANGES[filename] = (tstart, tstop)
if format is not None:
tstart = getattr(DateTime(tstart), format)
tstop = getattr(DateTime(tstop), format)
return tstart, tstop
def get_telem(msids, start=None, stop=None, sampling='full', unit_system='eng',
interpolate_dt=None, remove_events=None, select_events=None,
time_format=None, outfile=None, quiet=False,
max_fetch_Mb=1000, max_output_Mb=100):
"""
High-level routine to get telemetry for one or more MSIDs and perform
common processing functions:
- Fetch a set of MSIDs over a time range, specifying the sampling as
either full-resolution, 5-minute, or daily data.
- Filter out bad or missing data.
- Interpolate (resample) all MSID values to a common uniformly-spaced time sequence.
- Remove or select time intervals corresponding to specified Kadi event types.
- Change the time format from CXC seconds (seconds since 1998.0) to something more
convenient like GRETA time.
- Write the MSID telemetry data to a zipfile.
:param msids: MSID(s) to fetch (string or list of strings)')
:param start: Start time for data fetch (default=<stop> - 30 days)
:param stop: Stop time for data fetch (default=NOW)
:param sampling: Data sampling (full | 5min | daily) (default=full)
:param unit_system: Unit system for data (eng | sci | cxc) (default=eng)
:param interpolate_dt: Interpolate to uniform time steps (secs, default=None)
:param remove_events: Remove kadi events expression (default=None)
:param select_events: Select kadi events expression (default=None)
:param time_format: Output time format (secs|date|greta|jd|..., default=secs)
:param outfile: Output file name (default=None)
:param quiet: Suppress run-time logging output (default=False)
:param max_fetch_Mb: Max allowed memory (Mb) for fetching (default=1000)
:param max_output_Mb: Max allowed memory (Mb) for file output (default=100)
:returns: MSIDset object
"""
from .get_telem import get_telem
return get_telem(msids, start, stop, sampling, unit_system,
interpolate_dt, remove_events, select_events,
time_format, outfile, quiet,
max_fetch_Mb, max_output_Mb)
@memoized
def get_interval(content, tstart, tstop):
"""
Get the approximate row intervals that enclose the specified ``tstart`` and
``tstop`` times for the ``content`` type.
:param content: content type (e.g. 'pcad3eng', 'thm1eng')
:param tstart: start time (CXC seconds)
:param tstop: stop time (CXC seconds)
:returns: rowslice
"""
ft['content'] = content
@local_or_remote_function("Getting interval data from " +
"DB on Ska eng archive server...")
def get_interval_from_db(tstart, tstop, server):
import Ska.DBI
db = Ska.DBI.DBI(dbi='sqlite', server=os.path.join(*server))
query_row = db.fetchone('SELECT tstart, rowstart FROM archfiles '
'WHERE filetime < ? order by filetime desc',
(tstart,))
if not query_row:
query_row = db.fetchone('SELECT tstart, rowstart FROM archfiles '
'order by filetime asc')
rowstart = query_row['rowstart']
query_row = db.fetchone('SELECT tstop, rowstop FROM archfiles '
'WHERE filetime > ? order by filetime asc',
(tstop,))
if not query_row:
query_row = db.fetchone('SELECT tstop, rowstop FROM archfiles '
'order by filetime desc')
rowstop = query_row['rowstop']
return slice(rowstart, rowstop)
return get_interval_from_db(tstart, tstop, _split_path(msid_files['archfiles'].abs))
@contextlib.contextmanager
def _cache_ft():
"""
Cache the global filetype ``ft`` context variable so that fetch operations
do not corrupt user values of ``ft``.
"""
ft_cache_pickle = pickle.dumps(ft)
try:
yield
finally:
ft_cache = pickle.loads(ft_cache_pickle)
ft.update(ft_cache)
delkeys = [x for x in ft if x not in ft_cache]
for key in delkeys:
del ft[key]
@contextlib.contextmanager
def _set_msid_files_basedir(datestart, msid_files=msid_files):
"""
If datestart is before 2000:001:00:00:00 then use the 1999 archive files.
"""
try:
cache_basedir = msid_files.basedir
if datestart < DATE2000_LO:
# Note: don't use os.path.join because ENG_ARCHIVE and basedir must
# use linux '/' convention but this might be running on Windows.
dirs = msid_files.basedir.split(':')
msid_files.basedir = ':'.join(dir_ + '/1999' for dir_ in dirs)
yield
finally:
msid_files.basedir = cache_basedir
def _fix_ctu_dwell_mode_bads(msid, bads):
"""
Because of an issue related to the placement of the dwell mode flag, MSIDs that get
stepped on in dwell mode get a bad value at the beginning of a dwell mode, while the
dwell mode values (DWELLnn) get a bad value at the end. This does a simple
brute-force fix of expanding any section of bad values by ones sample in the
appropriate direction.
"""
MSID = msid.upper()
stepped_on_msids = ('4PRT5BT', '4RT585T', 'AFLCA3BI', 'AIRU1BT', 'CSITB5V',
'CUSOAOVN', 'ECNV3V', 'PLAED4ET', 'PR1TV01T', 'TCYLFMZM',
'TOXTSUPN', 'ES1P5CV', 'ES2P5CV')
if MSID in stepped_on_msids or re.match(r'DWELL\d\d', MSID):
# Find transitions from good value to bad value. Turn that
# good value to bad to extend the badness by one sample.
ok = (bads[:-1] == False) & (bads[1:] == True)
bads[:-1][ok] = True
return bads
def add_logging_handler(level=logging.INFO,
formatter=None,
handler=None):
"""Configure logging for fetch module.
:param level: logging level (logging.DEBUG, logging.INFO, etc)
:param formatter: logging.Formatter (default: Formatter('%(funcName)s: %(message)s'))
:param handler: logging.Handler (default: StreamHandler())
"""
if formatter is None:
formatter = logging.Formatter('%(funcName)s: %(message)s')
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(handler)
def _plural(x):
"""Return English plural of ``x``. Super-simple and only valid for the
known small set of cases within fetch where it will get applied.
"""
return x + 'es' if (x.endswith('x') or x.endswith('s')) else x + 's'
| [] | [] | [
"SKA",
"ENG_ARCHIVE"
] | [] | ["SKA", "ENG_ARCHIVE"] | python | 2 | 0 | |
internal/git/fetch_scanner.go | package git
import (
"bufio"
"bytes"
"io"
"strings"
)
// RefUpdateType represents the type of update a FetchStatusLine is. The
// valid types are documented here: https://git-scm.com/docs/git-fetch/2.30.0#Documentation/git-fetch.txt-flag
type RefUpdateType byte
// Valid checks whether the RefUpdateType is one of the seven valid types of update
func (t RefUpdateType) Valid() bool {
_, ok := validRefUpdateTypes[t]
return ok
}
// FetchStatusLine represents a line of status output from `git fetch`, as
// documented here: https://git-scm.com/docs/git-fetch/2.30.0#_output. Each
// line is a change to a git reference in the local repository that was caused
// by the fetch
type FetchStatusLine struct {
// Type encodes the kind of change that git fetch has made
Type RefUpdateType
// Summary is a brief description of the change. This may be text such as
// [new tag], or a compact-form SHA range showing the old and new values of
// the updated reference, depending on the type of update
Summary string
// From is usually the name of the remote ref being fetched from, missing
// the refs/<type>/ prefix. If a ref is being deleted, this will be "(none)"
From string
// To is the name of the local ref being updated, missing the refs/<type>/
// prefix.
To string
// Reason optionally contains human-readable information about the change. It
// is typically used to explain why making a given change failed (e.g., the
// type will be RefUpdateTypeUpdateFailed). It may be empty.
Reason string
}
const (
// RefUpdateTypeFastForwardUpdate represents a 'fast forward update' fetch status line
RefUpdateTypeFastForwardUpdate RefUpdateType = ' '
// RefUpdateTypeForcedUpdate represents a 'forced update' fetch status line
RefUpdateTypeForcedUpdate RefUpdateType = '+'
// RefUpdateTypePruned represents a 'pruned' fetch status line
RefUpdateTypePruned RefUpdateType = '-'
// RefUpdateTypeTagUpdate represents a 'tag update' fetch status line
RefUpdateTypeTagUpdate RefUpdateType = 't'
// RefUpdateTypeFetched represents a 'fetched' fetch status line. This
// indicates that a new reference has been created in the local repository
RefUpdateTypeFetched RefUpdateType = '*'
// RefUpdateTypeUpdateFailed represents an 'update failed' fetch status line
RefUpdateTypeUpdateFailed RefUpdateType = '!'
// RefUpdateTypeUnchanged represents an 'unchanged' fetch status line
RefUpdateTypeUnchanged RefUpdateType = '='
)
var validRefUpdateTypes = map[RefUpdateType]struct{}{
RefUpdateTypeFastForwardUpdate: {},
RefUpdateTypeForcedUpdate: {},
RefUpdateTypePruned: {},
RefUpdateTypeTagUpdate: {},
RefUpdateTypeFetched: {},
RefUpdateTypeUpdateFailed: {},
RefUpdateTypeUnchanged: {},
}
// IsTagAdded returns true if this status line indicates a new tag was added
func (f FetchStatusLine) IsTagAdded() bool {
return f.Type == RefUpdateTypeFetched && f.Summary == "[new tag]"
}
// IsTagUpdated returns true if this status line indicates a tag was changed
func (f FetchStatusLine) IsTagUpdated() bool {
return f.Type == RefUpdateTypeTagUpdate
}
// FetchScanner scans the output of `git fetch`, allowing information about
// the updated refs to be gathered
type FetchScanner struct {
scanner *bufio.Scanner
lastLine FetchStatusLine
}
// NewFetchScanner returns a new FetchScanner
func NewFetchScanner(r io.Reader) *FetchScanner {
return &FetchScanner{scanner: bufio.NewScanner(r)}
}
// Scan looks for the next fetch status line in the reader supplied to
// NewFetchScanner(). Any lines that are not valid status lines are discarded
// without error. It returns true if you should call Scan() again, and false if
// scanning has come to an end.
func (f *FetchScanner) Scan() bool {
for f.scanner.Scan() {
// Silently ignore non-matching lines
line, ok := parseFetchStatusLine(f.scanner.Bytes())
if !ok {
continue
}
f.lastLine = line
return true
}
return false
}
// Err returns any error encountered while scanning the reader supplied to
// NewFetchScanner(). Note that lines not matching the expected format are not
// an error.
func (f *FetchScanner) Err() error {
return f.scanner.Err()
}
// StatusLine returns the most recent fetch status line encountered by the
// FetchScanner. It changes after each call to Scan(), unless there is an error.
func (f *FetchScanner) StatusLine() FetchStatusLine {
return f.lastLine
}
// parseFetchStatusLine parses lines outputted by git-fetch(1), which are expected
// to be in the format " <flag> <summary> <from> -> <to> [<reason>]"
func parseFetchStatusLine(line []byte) (FetchStatusLine, bool) {
var blank FetchStatusLine
var out FetchStatusLine
// Handle the flag very strictly, since status and non-status text mingle
if len(line) < 4 || line[0] != ' ' || line[2] != ' ' {
return blank, false
}
out.Type, line = RefUpdateType(line[1]), line[3:]
if !out.Type.Valid() {
return blank, false
}
// Get the summary, which may be composed of multiple words
if line[0] == '[' {
end := bytes.IndexByte(line, ']')
if end < 0 || len(line) <= end+2 {
return blank, false
}
out.Summary, line = string(line[0:end+1]), line[end+1:]
} else {
end := bytes.IndexByte(line, ' ')
if end < 0 || len(line) <= end+1 {
return blank, false
}
out.Summary, line = string(line[0:end]), line[end:]
}
// We're now scanning the "<from> -> <to>" part, where "<from>" is the remote branch name
// while "<to>" is the local branch name transformed by the refspec. As branches cannot
// contain whitespace, it's fine to scan by word now.
scanner := bufio.NewScanner(bytes.NewReader(line))
scanner.Split(bufio.ScanWords)
// From field
if !scanner.Scan() {
return blank, false
}
out.From = scanner.Text()
// Hardcoded -> delimiter
if !scanner.Scan() || !bytes.Equal(scanner.Bytes(), []byte("->")) {
return blank, false
}
// To field
if !scanner.Scan() {
return blank, false
}
out.To = scanner.Text()
// Reason field - optional, the rest of the line. This implementation will
// squeeze multiple spaces into one, but that shouldn't be a big problem
var reason []string
for scanner.Scan() {
reason = append(reason, scanner.Text())
}
out.Reason = strings.Join(reason, " ")
return out, true
}
| [] | [] | [] | [] | [] | go | null | null | null |
tests/template_test.go | package tests
import (
"fmt"
"os"
"strings"
"testing"
ptesting "github.com/pulumi/pulumi/pkg/testing"
"github.com/pulumi/pulumi/pkg/testing/integration"
"github.com/pulumi/pulumi/pkg/workspace"
"github.com/stretchr/testify/assert"
)
func TestTemplates(t *testing.T) {
blackListedTests := os.Getenv("BLACK_LISTED_TESTS")
awsRegion := os.Getenv("AWS_REGION")
if awsRegion == "" {
awsRegion = "us-west-1"
fmt.Println("Defaulting AWS_REGION to 'us-west-1'. You can override using the AWS_REGION environment variable")
}
azureEnviron := os.Getenv("ARM_ENVIRONMENT")
if azureEnviron == "" {
azureEnviron = "public"
fmt.Println("Defaulting ARM_ENVIRONMENT to 'public'. You can override using the ARM_ENVIRONMENT variable")
}
azureLocation := os.Getenv("ARM_LOCATION")
if azureLocation == "" {
azureLocation = "westus"
fmt.Println("Defaulting ARM_LOCATION to 'westus'. You can override using the ARM_LOCATION variable")
}
gcpProject := os.Getenv("GOOGLE_PROJECT")
if gcpProject == "" {
gcpProject = "pulumi-ci-gcp-provider"
fmt.Println("Defaulting GOOGLE_PROJECT to 'pulumi-ci-gcp-provider'." +
"You can override using the GOOGLE_PROJECT variable")
}
gcpRegion := os.Getenv("GOOGLE_REGION")
if gcpRegion == "" {
gcpRegion = "us-central1"
fmt.Println("Defaulting GOOGLE_REGION to 'us-central1'. You can override using the GOOGLE_REGION variable")
}
gcpZone := os.Getenv("GOOGLE_ZONE")
if gcpZone == "" {
gcpZone = "us-central1-a"
fmt.Println("Defaulting GOOGLE_ZONE to 'us-central1-a'. You can override using the GOOGLE_ZONE variable")
}
base := integration.ProgramTestOptions{
Tracing: "https://tracing.pulumi-engineering.com/collector/api/v1/spans",
ExpectRefreshChanges: true,
Quick: true,
SkipRefresh: true,
NoParallel: true, // we mark tests as Parallel manually when instantiating
}
// Retrieve the template repo.
repo, err := workspace.RetrieveTemplates("", false /*offline*/, workspace.TemplateKindPulumiProject)
assert.NoError(t, err)
defer assert.NoError(t, repo.Delete())
// List the templates from the repo.
templates, err := repo.Templates()
assert.NoError(t, err)
blackListed := strings.Split(blackListedTests, ",")
for _, template := range templates {
templateName := template.Name
t.Run(templateName, func(t *testing.T) {
t.Parallel()
if isBlackListedTest(templateName, blackListed) {
t.Skipf("Skipping template test %s", templateName)
return
}
t.Logf("Starting test run for %q", templateName)
e := ptesting.NewEnvironment(t)
defer deleteIfNotFailed(e)
e.RunCommand("pulumi", "new", templateName, "-f", "-s", "template-test")
path, err := workspace.DetectProjectPathFrom(e.RootPath)
assert.NoError(t, err)
assert.NotEmpty(t, path)
_, err = workspace.LoadProject(path)
assert.NoError(t, err)
example := base.With(integration.ProgramTestOptions{
Dir: e.RootPath,
Config: map[string]string{
"aws:region": awsRegion,
"azure:environment": azureEnviron,
"azure:location": azureLocation,
"gcp:project": gcpProject,
"gcp:region": gcpRegion,
"gcp:zone": gcpZone,
"cloud:provider": "aws",
},
})
integration.ProgramTest(t, &example)
})
}
}
// deleteIfNotFailed deletes the files in the testing environment if the testcase has
// not failed. (Otherwise they are left to aid debugging.)
func deleteIfNotFailed(e *ptesting.Environment) {
if !e.T.Failed() {
e.DeleteEnvironment()
}
}
func isBlackListedTest(templateName string, backListedTests []string) bool {
for _, blackListed := range backListedTests {
if strings.Contains(templateName, blackListed) {
return true
}
}
return false
}
| [
"\"BLACK_LISTED_TESTS\"",
"\"AWS_REGION\"",
"\"ARM_ENVIRONMENT\"",
"\"ARM_LOCATION\"",
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_REGION\"",
"\"GOOGLE_ZONE\""
] | [] | [
"BLACK_LISTED_TESTS",
"GOOGLE_REGION",
"GOOGLE_ZONE",
"AWS_REGION",
"GOOGLE_PROJECT",
"ARM_ENVIRONMENT",
"ARM_LOCATION"
] | [] | ["BLACK_LISTED_TESTS", "GOOGLE_REGION", "GOOGLE_ZONE", "AWS_REGION", "GOOGLE_PROJECT", "ARM_ENVIRONMENT", "ARM_LOCATION"] | go | 7 | 0 | |
examples/msal-go/token_credential.go | package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
"github.com/pkg/errors"
"k8s.io/klog/v2"
)
// authResult contains the subset of results from token acquisition operation in ConfidentialClientApplication
// For details see https://aka.ms/msal-net-authenticationresult
type authResult struct {
accessToken string
expiresOn time.Time
grantedScopes []string
declinedScopes []string
}
func clientAssertionBearerAuthorizerCallback(tenantID, resource string) (*autorest.BearerAuthorizer, error) {
// Azure AD Workload Identity webhook will inject the following env vars
// AZURE_CLIENT_ID with the clientID set in the service account annotation
// AZURE_TENANT_ID with the tenantID set in the service account annotation. If not defined, then
// the tenantID provided via azure-wi-webhook-config for the webhook will be used.
// AZURE_FEDERATED_TOKEN_FILE is the service account token path
// AZURE_AUTHORITY_HOST is the AAD authority hostname
clientID := os.Getenv("AZURE_CLIENT_ID")
tokenFilePath := os.Getenv("AZURE_FEDERATED_TOKEN_FILE")
authorityHost := os.Getenv("AZURE_AUTHORITY_HOST")
// generate a token using the msal confidential client
// this will always generate a new token request to AAD
// TODO (aramase) consider using acquire token silent (https://github.com/Azure/azure-workload-identity/issues/76)
// read the service account token from the filesystem
signedAssertion, err := readJWTFromFS(tokenFilePath)
if err != nil {
klog.ErrorS(err, "failed to read the service account token from the filesystem")
return nil, errors.Wrap(err, "failed to read service account token")
}
cred, err := confidential.NewCredFromAssertion(signedAssertion)
if err != nil {
klog.ErrorS(err, "failed to create credential from signed assertion")
return nil, errors.Wrap(err, "failed to create confidential creds")
}
// create the confidential client to request an AAD token
confidentialClientApp, err := confidential.New(
clientID,
cred,
confidential.WithAuthority(fmt.Sprintf("%s%s/oauth2/token", authorityHost, tenantID)))
if err != nil {
klog.ErrorS(err, "failed to create confidential client")
return nil, errors.Wrap(err, "failed to create confidential client app")
}
// trim the suffix / if exists
resource = strings.TrimSuffix(resource, "/")
// .default needs to be added to the scope
if !strings.HasSuffix(resource, ".default") {
resource += "/.default"
}
result, err := confidentialClientApp.AcquireTokenByCredential(context.Background(), []string{resource})
if err != nil {
klog.ErrorS(err, "failed to acquire token")
return nil, errors.Wrap(err, "failed to acquire token")
}
return autorest.NewBearerAuthorizer(authResult{
accessToken: result.AccessToken,
expiresOn: result.ExpiresOn,
grantedScopes: result.GrantedScopes,
declinedScopes: result.DeclinedScopes,
}), nil
}
// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token.
func (ar authResult) OAuthToken() string {
return ar.accessToken
}
// readJWTFromFS reads the jwt from file system
func readJWTFromFS(tokenFilePath string) (string, error) {
token, err := os.ReadFile(tokenFilePath)
if err != nil {
return "", err
}
return string(token), nil
}
| [
"\"AZURE_CLIENT_ID\"",
"\"AZURE_FEDERATED_TOKEN_FILE\"",
"\"AZURE_AUTHORITY_HOST\""
] | [] | [
"AZURE_CLIENT_ID",
"AZURE_AUTHORITY_HOST",
"AZURE_FEDERATED_TOKEN_FILE"
] | [] | ["AZURE_CLIENT_ID", "AZURE_AUTHORITY_HOST", "AZURE_FEDERATED_TOKEN_FILE"] | go | 3 | 0 | |
ytplayer_pkg/server.py | # import pafy # https://github.com/mps-youtube/pafy
# import vlc
# import urllib
# import json
# from slackeventsapi import SlackEventAdapter
# from slack import WebClient
# from slack_webhook import Slack
# from ytplayer_pkg.youtube_lib import YouTubePlayer, YouTubeVideo
# from urllib.parse import urlencode
# import threading
# from flask import Flask
# from flask import request
# import os
# import sys
# import time
# # os.add_dll_directory(os.getcwd())
# os.add_dll_directory(r'C:\Program Files (x86)\VideoLAN\VLC')
# player = YouTubePlayer()
# playlist = []
# slack = Slack(
# url='https://hooks.slack.com/services/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')
# # Bind the Events API route to your existing Flask app by passing the server
# # instance as the last param, or with `server=app`.
# # Our app's Slack Event Adapter for receiving actions via the Events API
# SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"]
# # Create a SlackClient for your bot to use for Web API requests
# SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
# SLACK_SIGNING_SECRET = os.environ["SLACK_SIGNING_SECRET"]
# slack_client = WebClient(SLACK_BOT_TOKEN)
# # ------------------------------------------------------------------------------
# # ==============================================================================
# # Helper to send a message asking how the workshop is going, with a select menu
# def send_survey(user, channel, text):
# # More info: https://api.slack.com/docs/message-menus
# # Send an in-channel reply to the user
# print('>>send_survey>>', channel)
# slack_client.api_call(
# api_method='chat.postMessage',
# json={'channel': channel,
# 'text': text}
# )
# # ------------------------------------------------------------------------------
# def create_app():
# app = Flask(__name__)
# slack_events_adapter = SlackEventAdapter(
# SLACK_SIGNING_SECRET, "/slack/events", app)
# # SLACK_VERIFICATION_TOKEN, "/slack/events", app)
# @app.route("/slack/events", methods=['POST'])
# def slack_event():
# body = request.get_json()
# return body['challenge']
# # ==============================================================================
# # Event listener for app_mention events
# # app_mention events allow you to subscribe to only the messages directed
# # at your app's bot user
# @slack_events_adapter.on("app_mention")
# def handle_app_mention(event_data):
# message = event_data["event"]
# # If the incoming message contains "hi", then respond with a "Hello" message
# if message.get("subtype") is None:
# # If the incoming message contains "hi", then respond with
# # a "Hello" message
# if "hi" in message.get('text'):
# res_message = "Hi <@{}>! How do you feel today?".format(message["user"])
# send_survey(message["user"], message["channel"], res_message)
# else:
# res_message = "Pardon, I don't understand you."
# send_survey(message["user"], message["channel"], res_message)
# # ------------------------------------------------------------------------------
# @app.route("/")
# def hello():
# return "Hello, World!"
# @app.route("/add", methods=['GET', 'POST'])
# def add():
# if request.method == 'POST':
# # print('>>>', request.form['url'])
# # req_data = request.args.get('url')
# # req_data = request.args['url']
# req_data = request.get_json()
# print('>>>', req_data)
# if 'url' in req_data:
# url = req_data['url']
# yt_vid = YouTubeVideo.get_instance(url)
# playlist.append(yt_vid)
# player.enqueue(yt_vid)
# return "<h1 style='color:blue'>POST: add!</h1>"
# else:
# return "<h1 style='color:red'>GET: add!</h1>"
# @app.route("/play", methods=['POST'])
# def play():
# player.play()
# return "<h1 style='color:blue'>Play!</h1>"
# @app.route("/pause", methods=['POST'])
# def pause():
# player.pause()
# return "<h1 style='color:red'>Pause!</h1>"
# @app.route("/next", methods=['POST'])
# def next():
# inx_begin = player.get_nowplaying_idx();
# player.next()
# inx_after = player.get_nowplaying_idx();
# if inx_begin != inx_after:
# return "<h1 style='color:blue'>Next!</h1>"
# else:
# return "<h1 style='color:Orange'>End of list!</h1>"
# return app
# def __init__(self, player, playlist):
# self.player = player
# self.playlist = playlist
# if len(self.playlist) > 0:
# for v in playlist:
# self.player.enqueue(v.stream_url)
# return
# # if __name__ == "__main__":
# # app = create_app()
# # app.run('0.0.0.0')
| [] | [] | [
"SLACK_VERIFICATION_TOKEN",
"SLACK_SIGNING_SECRET",
"SLACK_BOT_TOKEN"
] | [] | ["SLACK_VERIFICATION_TOKEN", "SLACK_SIGNING_SECRET", "SLACK_BOT_TOKEN"] | python | 3 | 0 | |
cpp/setup.py | # -*- coding: utf-8 -*-
import os
import pathlib
import sys
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = "Debug" if self.debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
cmake_args = [
"-DPYBIND11_PYTHON_VERSION={}.{}".format(sys.version_info.major, sys.version_info.minor),
"-DPython_ROOT_DIR={}".format(str(pathlib.Path(sys.executable).parent.parent)),
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DCAIRL_GYM_VERSION_INFO={}".format(self.distribution.get_version()),
"-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm
"-DIS_PYBIND_BUILD={}".format("1"),
"-DENABLE_TESTS=NO"
]
build_args = []
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
cmake_args += ["-GNinja"]
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
build_args += ["--config", cfg]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += ["-j{}".format(self.parallel)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="cairl-gym",
version="1.0.0",
author="Per-Arne Andersen",
author_email="[email protected]",
url="https://github.com/cair/cairl",
description="Python bindings for cairl",
long_description="",
setup_requires=["pybind11", "numpy"],
ext_modules=[CMakeExtension("cairlpp.gym")],
extras_require={"test": "pytest"},
# Currently, build_ext only provides an optional "highest supported C++
# level" feature, but in the future it may provide more features.
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
) | [] | [] | [
"CMAKE_GENERATOR"
] | [] | ["CMAKE_GENERATOR"] | python | 1 | 0 | |
web3/web3.go | package web3
import (
"auctionBidder/utils"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"math/big"
"os"
"strings"
)
type Web3 struct {
Rpc *EthRPC
privateKeyMap map[string]string // address -> privateKey
}
func NewWeb3(ethereumNodeUrl string) *Web3 {
rpc := NewEthRPC(ethereumNodeUrl)
return &Web3{rpc, map[string]string{}}
}
func (w *Web3) AddPrivateKey(privateKey string) (newAddress string, err error) {
pk, err := utils.NewPrivateKeyByHex(privateKey)
if err != nil {
return
}
newAddress = utils.PubKey2Address(pk.PublicKey)
w.privateKeyMap[strings.ToLower(newAddress)] = strings.ToLower(privateKey)
return
}
func (w *Web3) NewBlockChannel() chan int64 {
c := make(chan int64)
go func() {
blockNum := 0
for true {
newBlockNum, err := w.Rpc.EthBlockNumber()
if err == nil {
if newBlockNum > blockNum {
c <- int64(newBlockNum)
blockNum = newBlockNum
}
}
}
}()
return c
}
type SendTxParams struct {
FromAddress string
GasLimit *big.Int
GasPrice *big.Int
Nonce uint64
}
type Contract struct {
web3 *Web3
abi *abi.ABI
address *common.Address
}
func (w *Web3) NewContract(abiStr string, address string) (contract *Contract, err error) {
abi, err := abi.JSON(strings.NewReader(abiStr))
if err != nil {
return
}
commonAddress := common.HexToAddress(address)
contract = &Contract{
w, &abi, &commonAddress,
}
return
}
func (c *Contract) Call(functionName string, args ...interface{}) (resp string, err error) {
var dataByte []byte
if args != nil {
dataByte, err = c.abi.Pack(functionName, args...)
} else {
dataByte = c.abi.Methods[functionName].ID()
}
if err != nil {
return
}
return c.web3.Rpc.EthCall(T{
To: c.address.String(),
From: "0x0000000000000000000000000000000000000000",
Data: fmt.Sprintf("0x%x", dataByte)},
"latest",
)
}
func (c *Contract) Send(params *SendTxParams, amount *big.Int, functionName string, args ...interface{}) (resp string, err error) {
if _, ok := c.web3.privateKeyMap[strings.ToLower(params.FromAddress)]; !ok {
err = utils.AddressNotExist
return
}
data, err := c.abi.Pack(functionName, args...)
if err != nil {
return
}
tx := types.NewTransaction(
params.Nonce,
*c.address,
amount,
params.GasLimit.Uint64(),
params.GasPrice,
data,
)
rawData, _ := utils.SignTx(c.web3.privateKeyMap[strings.ToLower(params.FromAddress)], os.Getenv("CHAIN_ID"), tx)
return c.web3.Rpc.EthSendRawTransaction(rawData)
}
func GetGasPriceGwei() (gasPriceInGwei int64) {
resp, err := utils.Get("https://ethgasstation.info/json/ethgasAPI.json", "", utils.EmptyKeyPairList, utils.EmptyKeyPairList)
if err != nil {
return 30 // default 30gwei
}
var dataContainer struct {
Fast float64 `json:"fast"`
Fastest float64 `json:"fastest"`
SafeLow float64 `json:"safeLow"`
Average float64 `json:"average"`
}
json.Unmarshal([]byte(resp), &dataContainer)
gasPriceInGwei = int64(dataContainer.Fast / 10)
if gasPriceInGwei > 300 {
gasPriceInGwei = 300
}
return
}
| [
"\"CHAIN_ID\""
] | [] | [
"CHAIN_ID"
] | [] | ["CHAIN_ID"] | go | 1 | 0 | |
vendor/github.com/lrstanley/girc/client.go | // Copyright (c) Liam Stanley <[email protected]>. All rights reserved. Use
// of this source code is governed by the MIT license that can be found in
// the LICENSE file.
package girc
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// Client contains all of the information necessary to run a single IRC
// client.
type Client struct {
// Config represents the configuration. Please take extra caution in that
// entries in this are not edited while the client is connected, to prevent
// data races. This is NOT concurrent safe to update.
Config Config
// rx is a buffer of events waiting to be processed.
rx chan *Event
// tx is a buffer of events waiting to be sent.
tx chan *Event
// state represents the throw-away state for the irc session.
state *state
// initTime represents the creation time of the client.
initTime time.Time
// Handlers is a handler which manages internal and external handlers.
Handlers *Caller
// CTCP is a handler which manages internal and external CTCP handlers.
CTCP *CTCP
// Cmd contains various helper methods to interact with the server.
Cmd *Commands
// mu is the mux used for connections/disconnections from the server,
// so multiple threads aren't trying to connect at the same time, and
// vice versa.
mu sync.RWMutex
// stop is used to communicate with Connect(), letting it know that the
// client wishes to cancel/close.
stop context.CancelFunc
// conn is a net.Conn reference to the IRC server. If this is nil, it is
// safe to assume that we're not connected. If this is not nil, this
// means we're either connected, connecting, or cleaning up. This should
// be guarded with Client.mu.
conn *ircConn
// debug is used if a writer is supplied for Client.Config.Debugger.
debug *log.Logger
}
// Config contains configuration options for an IRC client
type Config struct {
// Server is a host/ip of the server you want to connect to. This only
// has an affect during the dial process
Server string
// ServerPass is the server password used to authenticate. This only has
// an affect during the dial process.
ServerPass string
// Port is the port that will be used during server connection. This only
// has an affect during the dial process.
Port int
// Nick is an rfc-valid nickname used during connection. This only has an
// affect during the dial process.
Nick string
// User is the username/ident to use on connect. Ignored if an identd
// server is used. This only has an affect during the dial process.
User string
// Name is the "realname" that's used during connection. This only has an
// affect during the dial process.
Name string
// SASL contains the necessary authentication data to authenticate
// with SASL. See the documentation for SASLMech for what is currently
// supported. Capability tracking must be enabled for this to work, as
// this requires IRCv3 CAP handling.
SASL SASLMech
// WebIRC allows forwarding source user hostname/ip information to the server
// (if supported by the server) to ensure the source machine doesn't show as
// the source. See the WebIRC type for more information.
WebIRC WebIRC
// Bind is used to bind to a specific host or ip during the dial process
// when connecting to the server. This can be a hostname, however it must
// resolve to an IPv4/IPv6 address bindable on your system. Otherwise,
// you can simply use a IPv4/IPv6 address directly. This only has an
// affect during the dial process and will not work with DialerConnect().
Bind string
// SSL allows dialing via TLS. See TLSConfig to set your own TLS
// configuration (e.g. to not force hostname checking). This only has an
// affect during the dial process.
SSL bool
// DisableSTS disables the use of automatic STS connection upgrades
// when the server supports STS. STS can also be disabled using the environment
// variable "GIRC_DISABLE_STS=true". As many clients may not propagate options
// like this back to the user, this allows to directly disable such automatic
// functionality.
DisableSTS bool
// DisableSTSFallback disables the "fallback" to a non-tls connection if the
// strict transport policy expires and the first attempt to reconnect back to
// the tls version fails.
DisableSTSFallback bool
// TLSConfig is an optional user-supplied tls configuration, used during
// socket creation to the server. SSL must be enabled for this to be used.
// This only has an affect during the dial process.
TLSConfig *tls.Config
// AllowFlood allows the client to bypass the rate limit of outbound
// messages.
AllowFlood bool
// GlobalFormat enables passing through all events which have trailing
// text through the color Fmt() function, so you don't have to wrap
// every response in the Fmt() method.
//
// Note that this only actually applies to PRIVMSG, NOTICE and TOPIC
// events, to ensure it doesn't clobber unwanted events.
GlobalFormat bool
// Debug is an optional, user supplied location to log the raw lines
// sent from the server, or other useful debug logs. Defaults to
// ioutil.Discard. For quick debugging, this could be set to os.Stdout.
Debug io.Writer
// Out is used to write out a prettified version of incoming events. For
// example, channel JOIN/PART, PRIVMSG/NOTICE, KICk, etc. Useful to get
// a brief output of the activity of the client. If you are looking to
// log raw messages, look at a handler and girc.ALLEVENTS and the relevant
// Event.Bytes() or Event.String() methods.
Out io.Writer
// RecoverFunc is called when a handler throws a panic. If RecoverFunc is
// set, the panic will be considered recovered, otherwise the client will
// panic. Set this to DefaultRecoverHandler if you don't want the client
// to panic, however you don't want to handle the panic yourself.
// DefaultRecoverHandler will log the panic to Debug or os.Stdout if
// Debug is unset.
RecoverFunc func(c *Client, e *HandlerError)
// SupportedCaps are the IRCv3 capabilities you would like the client to
// support on top of the ones which the client already supports (see
// cap.go for which ones the client enables by default). Only use this
// if you have not called DisableTracking(). The keys value gets passed
// to the server if supported.
SupportedCaps map[string][]string
// Version is the application version information that will be used in
// response to a CTCP VERSION, if default CTCP replies have not been
// overwritten or a VERSION handler was already supplied.
Version string
// PingDelay is the frequency between when the client sends a keep-alive
// PING to the server, and awaits a response (and times out if the server
// doesn't respond in time). This should be between 20-600 seconds. See
// Client.Latency() if you want to determine the delay between the server
// and the client. If this is set to -1, the client will not attempt to
// send client -> server PING requests.
PingDelay time.Duration
// disableTracking disables all channel and user-level tracking. Useful
// for highly embedded scripts with single purposes. This has an exported
// method which enables this and ensures proper cleanup, see
// Client.DisableTracking().
disableTracking bool
// HandleNickCollide when set, allows the client to handle nick collisions
// in a custom way. If unset, the client will attempt to append a
// underscore to the end of the nickname, in order to bypass using
// an invalid nickname. For example, if "test" is already in use, or is
// blocked by the network/a service, the client will try and use "test_",
// then it will attempt "test__", "test___", and so on.
//
// If HandleNickCollide returns an empty string, the client will not
// attempt to fix nickname collisions, and you must handle this yourself.
HandleNickCollide func(oldNick string) (newNick string)
}
// WebIRC is useful when a user connects through an indirect method, such web
// clients, the indirect client sends its own IP address instead of sending the
// user's IP address unless WebIRC is implemented by both the client and the
// server.
//
// Client expectations:
// - Perform any proxy resolution.
// - Check the reverse DNS and forward DNS match.
// - Check the IP against suitable access controls (ipaccess, dnsbl, etc).
//
// More information:
// - https://ircv3.net/specs/extensions/webirc.html
// - https://kiwiirc.com/docs/webirc
type WebIRC struct {
// Password that authenticates the WEBIRC command from this client.
Password string
// Gateway or client type requesting spoof (cgiirc defaults to cgiirc, as an
// example).
Gateway string
// Hostname of user.
Hostname string
// Address either in IPv4 dotted quad notation (e.g. 192.0.0.2) or IPv6
// notation (e.g. 1234:5678:9abc::def). IPv4-in-IPv6 addresses
// (e.g. ::ffff:192.0.0.2) should not be sent.
Address string
}
// Params returns the arguments for the WEBIRC command that can be passed to the
// server.
func (w WebIRC) Params() []string {
return []string{w.Password, w.Gateway, w.Hostname, w.Address}
}
// ErrInvalidConfig is returned when the configuration passed to the client
// is invalid.
type ErrInvalidConfig struct {
Conf Config // Conf is the configuration that was not valid.
err error
}
func (e ErrInvalidConfig) Error() string { return "invalid configuration: " + e.err.Error() }
// isValid checks some basic settings to ensure the config is valid.
func (conf *Config) isValid() error {
if conf.Server == "" {
return &ErrInvalidConfig{Conf: *conf, err: errors.New("empty server")}
}
// Default port to 6667 (the standard IRC port).
if conf.Port == 0 {
conf.Port = 6667
}
if conf.Port < 1 || conf.Port > 65535 {
return &ErrInvalidConfig{Conf: *conf, err: errors.New("port outside valid range (1-65535)")}
}
if !IsValidNick(conf.Nick) {
return &ErrInvalidConfig{Conf: *conf, err: errors.New("bad nickname specified")}
}
if !IsValidUser(conf.User) {
return &ErrInvalidConfig{Conf: *conf, err: errors.New("bad user/ident specified")}
}
return nil
}
// ErrNotConnected is returned if a method is used when the client isn't
// connected.
var ErrNotConnected = errors.New("client is not connected to server")
// New creates a new IRC client with the specified server, name and config.
func New(config Config) *Client {
c := &Client{
Config: config,
rx: make(chan *Event, 25),
tx: make(chan *Event, 25),
CTCP: newCTCP(),
initTime: time.Now(),
}
c.Cmd = &Commands{c: c}
if c.Config.PingDelay >= 0 && c.Config.PingDelay < (20*time.Second) {
c.Config.PingDelay = 20 * time.Second
} else if c.Config.PingDelay > (600 * time.Second) {
c.Config.PingDelay = 600 * time.Second
}
envDebug, _ := strconv.ParseBool(os.Getenv("GIRC_DEBUG"))
if c.Config.Debug == nil {
if envDebug {
c.debug = log.New(os.Stderr, "debug:", log.Ltime|log.Lshortfile)
} else {
c.debug = log.New(ioutil.Discard, "", 0)
}
} else {
if envDebug {
if c.Config.Debug != os.Stdout && c.Config.Debug != os.Stderr {
c.Config.Debug = io.MultiWriter(os.Stderr, c.Config.Debug)
}
}
c.debug = log.New(c.Config.Debug, "debug:", log.Ltime|log.Lshortfile)
c.debug.Print("initializing debugging")
}
envDisableSTS, _ := strconv.ParseBool((os.Getenv("GIRC_DISABLE_STS")))
if envDisableSTS {
c.Config.DisableSTS = envDisableSTS
}
// Setup the caller.
c.Handlers = newCaller(c.debug)
// Give ourselves a new state.
c.state = &state{}
c.state.reset(true)
// Register builtin handlers.
c.registerBuiltins()
// Register default CTCP responses.
c.CTCP.addDefaultHandlers()
return c
}
// String returns a brief description of the current client state.
func (c *Client) String() string {
connected := c.IsConnected()
return fmt.Sprintf(
"<Client init:%q handlers:%d connected:%t>", c.initTime.String(), c.Handlers.Len(), connected,
)
}
// TLSConnectionState returns the TLS connection state from tls.Conn{}, which
// is useful to return needed TLS fingerprint info, certificates, verify cert
// expiration dates, etc. Will only return an error if the underlying
// connection wasn't established using TLS (see ErrConnNotTLS), or if the
// client isn't connected.
func (c *Client) TLSConnectionState() (*tls.ConnectionState, error) {
c.mu.RLock()
defer c.mu.RUnlock()
if c.conn == nil {
return nil, ErrNotConnected
}
c.conn.mu.RLock()
defer c.conn.mu.RUnlock()
if !c.conn.connected {
return nil, ErrNotConnected
}
if tlsConn, ok := c.conn.sock.(*tls.Conn); ok {
cs := tlsConn.ConnectionState()
return &cs, nil
}
return nil, ErrConnNotTLS
}
// ErrConnNotTLS is returned when Client.TLSConnectionState() is called, and
// the connection to the server wasn't made with TLS.
var ErrConnNotTLS = errors.New("underlying connection is not tls")
// Close closes the network connection to the server, and sends a CLOSED
// event. This should cause Connect() to return with nil. This should be
// safe to call multiple times. See Connect()'s documentation on how
// handlers and goroutines are handled when disconnected from the server.
func (c *Client) Close() {
c.mu.RLock()
if c.stop != nil {
c.debug.Print("requesting client to stop")
c.stop()
}
c.mu.RUnlock()
}
// Quit sends a QUIT message to the server with a given reason to close the
// connection. Underlying this event being sent, Client.Close() is called as well.
// This is different than just calling Client.Close() in that it provides a reason
// as to why the connection was closed (for bots to tell users the bot is restarting,
// or shutting down, etc).
//
// NOTE: servers may delay showing of QUIT reasons, until you've been connected to
// the server for a certain period of time (e.g. 5 minutes). Keep this in mind.
func (c *Client) Quit(reason string) {
c.Send(&Event{Command: QUIT, Params: []string{reason}})
}
// ErrEvent is an error returned when the server (or library) sends an ERROR
// message response. The string returned contains the trailing text from the
// message.
type ErrEvent struct {
Event *Event
}
func (e *ErrEvent) Error() string {
if e.Event == nil {
return "unknown error occurred"
}
return e.Event.Last()
}
func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGroup) {
c.debug.Print("starting execLoop")
defer c.debug.Print("closing execLoop")
var event *Event
for {
select {
case <-ctx.Done():
// We've been told to exit, however we shouldn't bail on the
// current events in the queue that should be processed, as one
// may want to handle an ERROR, QUIT, etc.
c.debug.Printf("received signal to close, flushing %d events and executing", len(c.rx))
for {
select {
case event = <-c.rx:
c.RunHandlers(event)
default:
goto done
}
}
done:
wg.Done()
return
case event = <-c.rx:
if event != nil && event.Command == ERROR {
// Handles incoming ERROR responses. These are only ever sent
// by the server (with the exception that this library may use
// them as a lower level way of signalling to disconnect due
// to some other client-choosen error), and should always be
// followed up by the server disconnecting the client. If for
// some reason the server doesn't disconnect the client, or
// if this library is the source of the error, this should
// signal back up to the main connect loop, to disconnect.
errs <- &ErrEvent{Event: event}
// Make sure to not actually exit, so we can let any handlers
// actually handle the ERROR event.
}
c.RunHandlers(event)
}
}
}
// DisableTracking disables all channel/user-level/CAP tracking, and clears
// all internal handlers. Useful for highly embedded scripts with single
// purposes. This cannot be un-done on a client.
func (c *Client) DisableTracking() {
c.debug.Print("disabling tracking")
c.Config.disableTracking = true
c.Handlers.clearInternal()
c.state.Lock()
c.state.channels = nil
c.state.Unlock()
c.state.notify(c, UPDATE_STATE)
c.registerBuiltins()
}
// Server returns the string representation of host+port pair for the connection.
func (c *Client) Server() string {
c.state.Lock()
defer c.state.Lock()
return c.server()
}
// server returns the string representation of host+port pair for net.Conn, and
// takes into consideration STS. Must lock state mu first!
func (c *Client) server() string {
if c.state.sts.enabled() {
return net.JoinHostPort(c.Config.Server, strconv.Itoa(c.state.sts.upgradePort))
}
return net.JoinHostPort(c.Config.Server, strconv.Itoa(c.Config.Port))
}
// Lifetime returns the amount of time that has passed since the client was
// created.
func (c *Client) Lifetime() time.Duration {
return time.Since(c.initTime)
}
// Uptime is the time at which the client successfully connected to the
// server.
func (c *Client) Uptime() (up *time.Time, err error) {
if !c.IsConnected() {
return nil, ErrNotConnected
}
c.mu.RLock()
c.conn.mu.RLock()
up = c.conn.connTime
c.conn.mu.RUnlock()
c.mu.RUnlock()
return up, nil
}
// ConnSince is the duration that has past since the client successfully
// connected to the server.
func (c *Client) ConnSince() (since *time.Duration, err error) {
if !c.IsConnected() {
return nil, ErrNotConnected
}
c.mu.RLock()
c.conn.mu.RLock()
timeSince := time.Since(*c.conn.connTime)
c.conn.mu.RUnlock()
c.mu.RUnlock()
return &timeSince, nil
}
// IsConnected returns true if the client is connected to the server.
func (c *Client) IsConnected() bool {
c.mu.RLock()
if c.conn == nil {
c.mu.RUnlock()
return false
}
c.conn.mu.RLock()
connected := c.conn.connected
c.conn.mu.RUnlock()
c.mu.RUnlock()
return connected
}
// GetNick returns the current nickname of the active connection. Panics if
// tracking is disabled.
func (c *Client) GetNick() string {
c.panicIfNotTracking()
c.state.RLock()
defer c.state.RUnlock()
if c.state.nick == "" {
return c.Config.Nick
}
return c.state.nick
}
// GetID returns an RFC1459 compliant version of the current nickname. Panics
// if tracking is disabled.
func (c *Client) GetID() string {
return ToRFC1459(c.GetNick())
}
// GetIdent returns the current ident of the active connection. Panics if
// tracking is disabled. May be empty, as this is obtained from when we join
// a channel, as there is no other more efficient method to return this info.
func (c *Client) GetIdent() string {
c.panicIfNotTracking()
c.state.RLock()
defer c.state.RUnlock()
if c.state.ident == "" {
return c.Config.User
}
return c.state.ident
}
// GetHost returns the current host of the active connection. Panics if
// tracking is disabled. May be empty, as this is obtained from when we join
// a channel, as there is no other more efficient method to return this info.
func (c *Client) GetHost() (host string) {
c.panicIfNotTracking()
c.state.RLock()
host = c.state.host
c.state.RUnlock()
return host
}
// ChannelList returns the (sorted) active list of channel names that the client
// is in. Panics if tracking is disabled.
func (c *Client) ChannelList() []string {
c.panicIfNotTracking()
c.state.RLock()
channels := make([]string, 0, len(c.state.channels))
for channel := range c.state.channels {
channels = append(channels, c.state.channels[channel].Name)
}
c.state.RUnlock()
sort.Strings(channels)
return channels
}
// Channels returns the (sorted) active channels that the client is in. Panics
// if tracking is disabled.
func (c *Client) Channels() []*Channel {
c.panicIfNotTracking()
c.state.RLock()
channels := make([]*Channel, 0, len(c.state.channels))
for channel := range c.state.channels {
channels = append(channels, c.state.channels[channel].Copy())
}
c.state.RUnlock()
sort.Slice(channels, func(i, j int) bool {
return channels[i].Name < channels[j].Name
})
return channels
}
// UserList returns the (sorted) active list of nicknames that the client is
// tracking across all channels. Panics if tracking is disabled.
func (c *Client) UserList() []string {
c.panicIfNotTracking()
c.state.RLock()
users := make([]string, 0, len(c.state.users))
for user := range c.state.users {
users = append(users, c.state.users[user].Nick)
}
c.state.RUnlock()
sort.Strings(users)
return users
}
// Users returns the (sorted) active users that the client is tracking across
// all channels. Panics if tracking is disabled.
func (c *Client) Users() []*User {
c.panicIfNotTracking()
c.state.RLock()
users := make([]*User, 0, len(c.state.users))
for user := range c.state.users {
users = append(users, c.state.users[user].Copy())
}
c.state.RUnlock()
sort.Slice(users, func(i, j int) bool {
return users[i].Nick < users[j].Nick
})
return users
}
// LookupChannel looks up a given channel in state. If the channel doesn't
// exist, nil is returned. Panics if tracking is disabled.
func (c *Client) LookupChannel(name string) (channel *Channel) {
c.panicIfNotTracking()
if name == "" {
return nil
}
c.state.RLock()
channel = c.state.lookupChannel(name).Copy()
c.state.RUnlock()
return channel
}
// LookupUser looks up a given user in state. If the user doesn't exist, nil
// is returned. Panics if tracking is disabled.
func (c *Client) LookupUser(nick string) (user *User) {
c.panicIfNotTracking()
if nick == "" {
return nil
}
c.state.RLock()
user = c.state.lookupUser(nick).Copy()
c.state.RUnlock()
return user
}
// IsInChannel returns true if the client is in channel. Panics if tracking
// is disabled.
func (c *Client) IsInChannel(channel string) (in bool) {
c.panicIfNotTracking()
c.state.RLock()
_, in = c.state.channels[ToRFC1459(channel)]
c.state.RUnlock()
return in
}
// GetServerOption retrieves a server capability setting that was retrieved
// during client connection. This is also known as ISUPPORT (or RPL_PROTOCTL).
// Will panic if used when tracking has been disabled. Examples of usage:
//
// nickLen, success := GetServerOption("MAXNICKLEN")
//
func (c *Client) GetServerOption(key string) (result string, ok bool) {
c.panicIfNotTracking()
c.state.RLock()
result, ok = c.state.serverOptions[key]
c.state.RUnlock()
return result, ok
}
// NetworkName returns the network identifier. E.g. "EsperNet", "ByteIRC".
// May be empty if the server does not support RPL_ISUPPORT (or RPL_PROTOCTL).
// Will panic if used when tracking has been disabled.
func (c *Client) NetworkName() (name string) {
c.panicIfNotTracking()
name, _ = c.GetServerOption("NETWORK")
return name
}
// ServerVersion returns the server software version, if the server has
// supplied this information during connection. May be empty if the server
// does not support RPL_MYINFO. Will panic if used when tracking has been
// disabled.
func (c *Client) ServerVersion() (version string) {
c.panicIfNotTracking()
version, _ = c.GetServerOption("VERSION")
return version
}
// ServerMOTD returns the servers message of the day, if the server has sent
// it upon connect. Will panic if used when tracking has been disabled.
func (c *Client) ServerMOTD() (motd string) {
c.panicIfNotTracking()
c.state.RLock()
motd = c.state.motd
c.state.RUnlock()
return motd
}
// Latency is the latency between the server and the client. This is measured
// by determining the difference in time between when we ping the server, and
// when we receive a pong.
func (c *Client) Latency() (delta time.Duration) {
c.mu.RLock()
c.conn.mu.RLock()
delta = c.conn.lastPong.Sub(c.conn.lastPing)
c.conn.mu.RUnlock()
c.mu.RUnlock()
if delta < 0 {
return 0
}
return delta
}
// HasCapability checks if the client connection has the given capability. If
// you want the full list of capabilities, listen for the girc.CAP_ACK event.
// Will panic if used when tracking has been disabled.
func (c *Client) HasCapability(name string) (has bool) {
c.panicIfNotTracking()
if !c.IsConnected() {
return false
}
name = strings.ToLower(name)
c.state.RLock()
for key := range c.state.enabledCap {
key = strings.ToLower(key)
if key == name {
has = true
break
}
}
c.state.RUnlock()
return has
}
// panicIfNotTracking will throw a panic when it's called, and tracking is
// disabled. Adds useful info like what function specifically, and where it
// was called from.
func (c *Client) panicIfNotTracking() {
if !c.Config.disableTracking {
return
}
pc, _, _, _ := runtime.Caller(1)
fn := runtime.FuncForPC(pc)
_, file, line, _ := runtime.Caller(2)
panic(fmt.Sprintf("%s used when tracking is disabled (caller %s:%d)", fn.Name(), file, line))
}
func (c *Client) debugLogEvent(e *Event, dropped bool) {
var prefix string
if dropped {
prefix = "dropping event (disconnected):"
} else {
prefix = ">"
}
if e.Sensitive {
c.debug.Printf(prefix, " %s ***redacted***", e.Command)
} else {
c.debug.Print(prefix, " ", StripRaw(e.String()))
}
if c.Config.Out != nil {
if pretty, ok := e.Pretty(); ok {
fmt.Fprintln(c.Config.Out, StripRaw(pretty))
}
}
}
| [
"\"GIRC_DEBUG\"",
"\"GIRC_DISABLE_STS\""
] | [] | [
"GIRC_DISABLE_STS",
"GIRC_DEBUG"
] | [] | ["GIRC_DISABLE_STS", "GIRC_DEBUG"] | go | 2 | 0 | |
env/lib/python3.6/site-packages/paramiko/config.py | # Copyright (C) 2006-2007 Robey Pointer <[email protected]>
# Copyright (C) 2012 Olle Lundberg <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Configuration file (aka ``ssh_config``) support.
"""
import fnmatch
import os
import re
import shlex
import socket
SSH_PORT = 22
class SSHConfig (object):
"""
Representation of config information as stored in the format used by
OpenSSH. Queries can be made via `lookup`. The format is described in
OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
convenience to posix users (since the OpenSSH format is a de-facto
standard on posix) but should work fine on Windows too.
.. versionadded:: 1.6
"""
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
def __init__(self):
"""
Create a new OpenSSH config object.
"""
self._config = []
def parse(self, file_obj):
"""
Read an OpenSSH config from the given file object.
:param file_obj: a file-like object to read the config file from
"""
host = {"host": ['*'], "config": {}}
for line in file_obj:
# Strip any leading or trailing whitespace from the line.
# Refer to https://github.com/paramiko/paramiko/issues/499
line = line.strip()
if not line or line.startswith('#'):
continue
match = re.match(self.SETTINGS_REGEX, line)
if not match:
raise Exception("Unparsable line {}".format(line))
key = match.group(1).lower()
value = match.group(2)
if key == 'host':
self._config.append(host)
host = {
'host': self._get_hosts(value),
'config': {}
}
elif key == 'proxycommand' and value.lower() == 'none':
# Store 'none' as None; prior to 3.x, it will get stripped out
# at the end (for compatibility with issue #415). After 3.x, it
# will simply not get stripped, leaving a nice explicit marker.
host['config'][key] = None
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
# identityfile, localforward, remoteforward keys are special
# cases, since they are allowed to be specified multiple times
# and they should be tried in order of specification.
if key in ['identityfile', 'localforward', 'remoteforward']:
if key in host['config']:
host['config'][key].append(value)
else:
host['config'][key] = [value]
elif key not in host['config']:
host['config'][key] = value
self._config.append(host)
def lookup(self, hostname):
"""
Return a dict of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host``
specifications, and that section is only applied for hosts that match
one of the patterns given in the specification.
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
:param str hostname: the hostname to lookup
"""
matches = [
config for config in self._config
if self._allowed(config['host'], hostname)
]
ret = {}
for match in matches:
for key, value in match['config'].items():
if key not in ret:
# Create a copy of the original value,
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
ret[key] = value[:] if value is not None else value
elif key == 'identityfile':
ret[key].extend(value)
ret = self._expand_variables(ret, hostname)
# TODO: remove in 3.x re #670
if 'proxycommand' in ret and ret['proxycommand'] is None:
del ret['proxycommand']
return ret
def get_hostnames(self):
"""
Return the set of literal hostnames defined in the SSH config (both
explicit hostnames and wildcard entries).
"""
hosts = set()
for entry in self._config:
hosts.update(entry['host'])
return hosts
def _allowed(self, hosts, hostname):
match = False
for host in hosts:
if host.startswith('!') and fnmatch.fnmatch(hostname, host[1:]):
return False
elif fnmatch.fnmatch(hostname, host):
match = True
return match
def _expand_variables(self, config, hostname):
"""
Return a dict of config options with expanded substitutions
for a given hostname.
Please refer to man ``ssh_config`` for the parameters that
are replaced.
:param dict config: the config for the hostname
:param str hostname: the hostname that the config belongs to
"""
if 'hostname' in config:
config['hostname'] = config['hostname'].replace('%h', hostname)
else:
config['hostname'] = hostname
if 'port' in config:
port = config['port']
else:
port = SSH_PORT
user = os.getenv('USER')
if 'user' in config:
remoteuser = config['user']
else:
remoteuser = user
host = socket.gethostname().split('.')[0]
fqdn = LazyFqdn(config, host)
homedir = os.path.expanduser('~')
replacements = {'controlpath':
[
('%h', config['hostname']),
('%l', fqdn),
('%L', host),
('%n', hostname),
('%p', port),
('%r', remoteuser),
('%u', user)
],
'identityfile':
[
('~', homedir),
('%d', homedir),
('%h', config['hostname']),
('%l', fqdn),
('%u', user),
('%r', remoteuser)
],
'proxycommand':
[
('~', homedir),
('%h', config['hostname']),
('%p', port),
('%r', remoteuser)
]
}
for k in config:
if config[k] is None:
continue
if k in replacements:
for find, replace in replacements[k]:
if isinstance(config[k], list):
for item in range(len(config[k])):
if find in config[k][item]:
config[k][item] = config[k][item].replace(
find, str(replace)
)
else:
if find in config[k]:
config[k] = config[k].replace(find, str(replace))
return config
def _get_hosts(self, host):
"""
Return a list of host_names from host value.
"""
try:
return shlex.split(host)
except ValueError:
raise Exception("Unparsable host {}".format(host))
class LazyFqdn(object):
"""
Returns the host's fqdn on request as string.
"""
def __init__(self, config, host=None):
self.fqdn = None
self.config = config
self.host = host
def __str__(self):
if self.fqdn is None:
#
# If the SSH config contains AddressFamily, use that when
# determining the local host's FQDN. Using socket.getfqdn() from
# the standard library is the most general solution, but can
# result in noticeable delays on some platforms when IPv6 is
# misconfigured or not available, as it calls getaddrinfo with no
# address family specified, so both IPv4 and IPv6 are checked.
#
# Handle specific option
fqdn = None
address_family = self.config.get('addressfamily', 'any').lower()
if address_family != 'any':
try:
family = socket.AF_INET6
if address_family == 'inet':
socket.AF_INET
results = socket.getaddrinfo(
self.host,
None,
family,
socket.SOCK_DGRAM,
socket.IPPROTO_IP,
socket.AI_CANONNAME
)
for res in results:
af, socktype, proto, canonname, sa = res
if canonname and '.' in canonname:
fqdn = canonname
break
# giaerror -> socket.getaddrinfo() can't resolve self.host
# (which is from socket.gethostname()). Fall back to the
# getfqdn() call below.
except socket.gaierror:
pass
# Handle 'any' / unspecified
if fqdn is None:
fqdn = socket.getfqdn()
# Cache
self.fqdn = fqdn
return self.fqdn
| [] | [] | [
"USER"
] | [] | ["USER"] | python | 1 | 0 | |
.github/workflows/trigger_circle_ci.py | import json
import os
import sys
import time
import requests
def assert_result(result, expected_code):
if result.status_code != expected_code:
raise RuntimeError(f"{result.url}, {result.status_code}: {result.text}")
def get_output(result_text, required_keys):
output = json.loads(result_text)
if not all([v in output for v in required_keys]):
raise RuntimeError(f"Output does not contain required fields: {required_keys}\n" f"Output is: {output}")
return output
def trigger_new_pipeline(data, headers):
result = requests.post(
"https://circleci.com/api/v2/project/gh/pytorch/ignite/pipeline", data=json.dumps(data), headers=headers
)
assert_result(result, 201)
output = get_output(result.text, ["id"])
return output["id"]
def assert_pipeline_created(pipeline_id, headers):
while True:
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}", headers=headers)
assert_result(result, 200)
output = get_output(result.text, ["state", "errors"])
if output["state"] == "errored":
raise RuntimeError(f"Pipeline is errored: {output['errors']}")
if output["state"] == "created":
break
time.sleep(2)
def get_workflow_id(pipeline_id, headers):
while True:
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}/workflow", headers=headers)
assert_result(result, 200)
output = get_output(result.text, ["items"])
items = output["items"]
if len(items) > 1:
raise RuntimeError(f"Incorrect number of workflow ids: {len(items)} != 1\n" f"items: {items}")
if len(items) < 1:
continue
item_0 = items[0]
if "id" not in item_0:
raise RuntimeError("Workflow info does not contain 'id'\n" f"Info: {item_0}")
return item_0["id"]
def assert_workflows_successful(pipeline_id, headers):
workflow_id = get_workflow_id(pipeline_id, headers)
base_url = "https://app.circleci.com/pipelines/github/pytorch/ignite"
url = None
while True:
result = requests.get(f"https://circleci.com/api/v2/workflow/{workflow_id}", headers=headers)
assert_result(result, 200)
output = get_output(result.text, ["name", "status", "pipeline_number"])
if url is None:
url = f"{base_url}/{output['pipeline_number']}/workflows/{workflow_id}"
print(f"Circle CI workflow: {url}")
if output["status"] in ["error", "failing", "canceled", "not_run", "failed"]:
raise RuntimeError(f"Workflow failed: {output['status']}\n" f"See {url}")
if output["status"] == "success":
print("\nWorkflow successful")
break
time.sleep(30)
print(".", end=" ")
if __name__ == "__main__":
print("Trigger new pipeline on Circle-CI")
if "CIRCLE_TOKEN" not in os.environ:
raise RuntimeError(
"Can not find CIRCLE_TOKEN env variable.\nPlease, export CIRCLE_TOKEN=<token> before calling this script."
"This token should be a user token and not the project token."
)
# https://discuss.circleci.com/t/triggering-pipeline-via-v2-api-fails-with-404-project-not-found/39342/2
argv = sys.argv
if len(argv) != 3:
raise RuntimeError("Usage: python trigger_circle_ci.py <true or false> <branch-name>")
should_publish_docker_images = json.loads(argv[1])
branch = argv[2]
print(f"- should_publish_docker_images: {should_publish_docker_images}")
print(f"- Branch: {branch}")
if branch.startswith("refs/pull") and branch.endswith("/merge"):
branch = branch.replace("/merge", "/head")
print(f"Replaced /merge -> /head : {branch}")
headers = {"authorization": "Basic", "content-type": "application/json", "Circle-Token": os.environ["CIRCLE_TOKEN"]}
data = {
"branch": branch,
"parameters": {
"should_build_docker_images": True,
"should_publish_docker_images": should_publish_docker_images,
},
}
unique_pipeline_id = trigger_new_pipeline(data, headers)
assert_pipeline_created(unique_pipeline_id, headers)
assert_workflows_successful(unique_pipeline_id, headers)
| [] | [] | [
"CIRCLE_TOKEN"
] | [] | ["CIRCLE_TOKEN"] | python | 1 | 0 | |
fhirclient/r4models/auditevent_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import auditevent
from .fhirdate import FHIRDate
class AuditEventTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("AuditEvent", js["resourceType"])
return auditevent.AuditEvent(js)
def testAuditEvent1(self):
inst = self.instantiate_from("audit-event-example-search.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent1(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent1(inst2)
def implAuditEvent1(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.entity[0].query, "aHR0cDovL2ZoaXItZGV2LmhlYWx0aGludGVyc2VjdGlvbnMuY29tLmF1L29wZW4vRW5jb3VudGVyP3BhcnRpY2lwYW50PTEz")
self.assertEqual(inst.entity[0].role.code, "24")
self.assertEqual(inst.entity[0].role.display, "Query")
self.assertEqual(inst.entity[0].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[0].type.code, "2")
self.assertEqual(inst.entity[0].type.display, "System Object")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example-search")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-22T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-22T23:42:24Z")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "search")
self.assertEqual(inst.subtype[0].display, "search")
self.assertEqual(inst.subtype[0].system, "http://hl7.org/fhir/restful-interaction")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "rest")
self.assertEqual(inst.type.display, "Restful Operation")
self.assertEqual(inst.type.system, "http://terminology.hl7.org/CodeSystem/audit-event-type")
def testAuditEvent2(self):
inst = self.instantiate_from("audit-event-example-logout.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent2(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent2(inst2)
def implAuditEvent2(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.id, "example-logout")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:46:41Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:46:41Z")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "110123")
self.assertEqual(inst.subtype[0].display, "Logout")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110114")
self.assertEqual(inst.type.display, "User Authentication")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent3(self):
inst = self.instantiate_from("audit-event-example-vread.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent3(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent3(inst2)
def implAuditEvent3(self, inst):
self.assertEqual(inst.action, "R")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.entity[0].lifecycle.code, "6")
self.assertEqual(inst.entity[0].lifecycle.display, "Access / Use")
self.assertEqual(inst.entity[0].lifecycle.system, "http://terminology.hl7.org/CodeSystem/dicom-audit-lifecycle")
self.assertEqual(inst.entity[0].type.code, "2")
self.assertEqual(inst.entity[0].type.display, "System Object")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example-rest")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:42:24Z")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "vread")
self.assertEqual(inst.subtype[0].display, "vread")
self.assertEqual(inst.subtype[0].system, "http://hl7.org/fhir/restful-interaction")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "rest")
self.assertEqual(inst.type.display, "Restful Operation")
self.assertEqual(inst.type.system, "http://terminology.hl7.org/CodeSystem/audit-event-type")
def testAuditEvent4(self):
inst = self.instantiate_from("audit-event-example-media.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent4(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent4(inst2)
def implAuditEvent4(self, inst):
self.assertEqual(inst.action, "R")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "110153")
self.assertEqual(inst.agent[0].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].altId, "601847123")
self.assertEqual(inst.agent[1].name, "Grahame Grieve")
self.assertTrue(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[1].type.coding[0].display, "human user")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[2].media.code, "110033")
self.assertEqual(inst.agent[2].media.display, "DVD")
self.assertEqual(inst.agent[2].media.system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[2].name, "Media title: Hello World")
self.assertFalse(inst.agent[2].requestor)
self.assertEqual(inst.agent[2].type.coding[0].code, "110154")
self.assertEqual(inst.agent[2].type.coding[0].display, "Destination Media")
self.assertEqual(inst.agent[2].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.entity[1].role.code, "20")
self.assertEqual(inst.entity[1].role.display, "Job")
self.assertEqual(inst.entity[1].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.entity[2].type.code, "2")
self.assertEqual(inst.entity[2].type.display, "System Object")
self.assertEqual(inst.entity[2].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example-media")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-27T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-27T23:42:24Z")
self.assertEqual(inst.subtype[0].code, "ITI-32")
self.assertEqual(inst.subtype[0].display, "Distribute Document Set on Media")
self.assertEqual(inst.subtype[0].system, "urn:oid:1.3.6.1.4.1.19376.1.2")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110106")
self.assertEqual(inst.type.display, "Export")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent5(self):
inst = self.instantiate_from("audit-event-example-login.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent5(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent5(inst2)
def implAuditEvent5(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.id, "example-login")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2013-06-20T23:41:23Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-06-20T23:41:23Z")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "110122")
self.assertEqual(inst.subtype[0].display, "Login")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110114")
self.assertEqual(inst.type.display, "User Authentication")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent6(self):
inst = self.instantiate_from("audit-event-example-pixQuery.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent6(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent6(inst2)
def implAuditEvent6(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].altId, "6580")
self.assertEqual(inst.agent[0].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[0].network.type, "1")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "110153")
self.assertEqual(inst.agent[0].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].altId, "601847123")
self.assertEqual(inst.agent[1].name, "Grahame Grieve")
self.assertTrue(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[1].type.coding[0].display, "human user")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.entity[1].detail[0].type, "MSH-10")
self.assertEqual(inst.entity[1].detail[0].valueBase64Binary, "MS4yLjg0MC4xMTQzNTAuMS4xMy4wLjEuNy4xLjE=")
self.assertEqual(inst.entity[1].role.code, "24")
self.assertEqual(inst.entity[1].role.display, "Query")
self.assertEqual(inst.entity[1].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example-pixQuery")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-26T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-26T23:42:24Z")
self.assertEqual(inst.subtype[0].code, "ITI-9")
self.assertEqual(inst.subtype[0].display, "PIX Query")
self.assertEqual(inst.subtype[0].system, "urn:oid:1.3.6.1.4.1.19376.1.2")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110112")
self.assertEqual(inst.type.display, "Query")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent7(self):
inst = self.instantiate_from("auditevent-example.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent7(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent7(inst2)
def implAuditEvent7(self, inst):
self.assertEqual(inst.action, "E")
self.assertEqual(inst.agent[0].network.address, "127.0.0.1")
self.assertEqual(inst.agent[0].network.type, "2")
self.assertFalse(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].role[0].text, "Service User (Logon)")
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.entity[0].lifecycle.code, "6")
self.assertEqual(inst.entity[0].lifecycle.display, "Access / Use")
self.assertEqual(inst.entity[0].lifecycle.system, "http://terminology.hl7.org/CodeSystem/dicom-audit-lifecycle")
self.assertEqual(inst.entity[0].name, "Grahame's Laptop")
self.assertEqual(inst.entity[0].role.code, "4")
self.assertEqual(inst.entity[0].role.display, "Domain Resource")
self.assertEqual(inst.entity[0].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[0].type.code, "4")
self.assertEqual(inst.entity[0].type.display, "Other")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.recorded.date, FHIRDate("2012-10-25T22:04:27+11:00").date)
self.assertEqual(inst.recorded.as_json(), "2012-10-25T22:04:27+11:00")
self.assertEqual(inst.source.site, "Development")
self.assertEqual(inst.source.type[0].code, "110122")
self.assertEqual(inst.source.type[0].display, "Login")
self.assertEqual(inst.source.type[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.subtype[0].code, "110120")
self.assertEqual(inst.subtype[0].display, "Application Start")
self.assertEqual(inst.subtype[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Application Start for under service login "Grahame" (id: Grahame's Test HL7Connect)</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110100")
self.assertEqual(inst.type.display, "Application Activity")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent8(self):
inst = self.instantiate_from("auditevent-example-disclosure.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent8(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent8(inst2)
def implAuditEvent8(self, inst):
self.assertEqual(inst.action, "R")
self.assertEqual(inst.agent[0].altId, "notMe")
self.assertEqual(inst.agent[0].name, "That guy everyone wishes would be caught")
self.assertEqual(inst.agent[0].network.address, "custodian.net")
self.assertEqual(inst.agent[0].network.type, "1")
self.assertEqual(inst.agent[0].policy[0], "http://consent.com/yes")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "110153")
self.assertEqual(inst.agent[0].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.agent[1].network.address, "marketing.land")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].code, "HMARKT")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].display, "healthcare marketing")
self.assertEqual(inst.agent[1].purposeOfUse[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110152")
self.assertEqual(inst.agent[1].type.coding[0].display, "Destination Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.entity[0].role.code, "1")
self.assertEqual(inst.entity[0].role.display, "Patient")
self.assertEqual(inst.entity[0].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[0].type.code, "1")
self.assertEqual(inst.entity[0].type.display, "Person")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.entity[1].description, "data about Everthing important")
self.assertEqual(inst.entity[1].lifecycle.code, "11")
self.assertEqual(inst.entity[1].lifecycle.display, "Disclosure")
self.assertEqual(inst.entity[1].lifecycle.system, "http://terminology.hl7.org/CodeSystem/dicom-audit-lifecycle")
self.assertEqual(inst.entity[1].name, "Namne of What")
self.assertEqual(inst.entity[1].role.code, "4")
self.assertEqual(inst.entity[1].role.display, "Domain Resource")
self.assertEqual(inst.entity[1].role.system, "http://terminology.hl7.org/CodeSystem/object-role")
self.assertEqual(inst.entity[1].securityLabel[0].code, "V")
self.assertEqual(inst.entity[1].securityLabel[0].display, "very restricted")
self.assertEqual(inst.entity[1].securityLabel[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.entity[1].securityLabel[1].code, "STD")
self.assertEqual(inst.entity[1].securityLabel[1].display, "sexually transmitted disease information sensitivity")
self.assertEqual(inst.entity[1].securityLabel[1].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.entity[1].securityLabel[2].code, "DELAU")
self.assertEqual(inst.entity[1].securityLabel[2].display, "delete after use")
self.assertEqual(inst.entity[1].securityLabel[2].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.entity[1].type.code, "2")
self.assertEqual(inst.entity[1].type.display, "System Object")
self.assertEqual(inst.entity[1].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.id, "example-disclosure")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "0")
self.assertEqual(inst.outcomeDesc, "Successful Disclosure")
self.assertEqual(inst.purposeOfEvent[0].coding[0].code, "HMARKT")
self.assertEqual(inst.purposeOfEvent[0].coding[0].display, "healthcare marketing")
self.assertEqual(inst.purposeOfEvent[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.recorded.date, FHIRDate("2013-09-22T00:08:00Z").date)
self.assertEqual(inst.recorded.as_json(), "2013-09-22T00:08:00Z")
self.assertEqual(inst.source.site, "Watcher")
self.assertEqual(inst.source.type[0].code, "4")
self.assertEqual(inst.source.type[0].display, "Application Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "Disclosure")
self.assertEqual(inst.subtype[0].display, "HIPAA disclosure")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Disclosure by some idiot, for marketing reasons, to places unknown, of a Poor Sap, data about Everthing important.</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "110106")
self.assertEqual(inst.type.display, "Export")
self.assertEqual(inst.type.system, "http://dicom.nema.org/resources/ontology/DCM")
def testAuditEvent9(self):
inst = self.instantiate_from("auditevent-example-error.json")
self.assertIsNotNone(inst, "Must have instantiated a AuditEvent instance")
self.implAuditEvent9(inst)
js = inst.as_json()
self.assertEqual("AuditEvent", js["resourceType"])
inst2 = auditevent.AuditEvent(js)
self.implAuditEvent9(inst2)
def implAuditEvent9(self, inst):
self.assertEqual(inst.action, "C")
self.assertEqual(inst.agent[0].altId, "601847123")
self.assertEqual(inst.agent[0].name, "Grahame Grieve")
self.assertTrue(inst.agent[0].requestor)
self.assertEqual(inst.agent[0].type.coding[0].code, "humanuser")
self.assertEqual(inst.agent[0].type.coding[0].display, "human user")
self.assertEqual(inst.agent[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/extra-security-role-type")
self.assertEqual(inst.agent[1].altId, "6580")
self.assertEqual(inst.agent[1].network.address, "Workstation1.ehr.familyclinic.com")
self.assertEqual(inst.agent[1].network.type, "1")
self.assertFalse(inst.agent[1].requestor)
self.assertEqual(inst.agent[1].type.coding[0].code, "110153")
self.assertEqual(inst.agent[1].type.coding[0].display, "Source Role ID")
self.assertEqual(inst.agent[1].type.coding[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.contained[0].id, "o1")
self.assertEqual(inst.entity[0].detail[0].type, "requested transaction")
self.assertEqual(inst.entity[0].detail[0].valueString, "http POST ..... ")
self.assertEqual(inst.entity[0].type.code, "2")
self.assertEqual(inst.entity[0].type.display, "System Object")
self.assertEqual(inst.entity[0].type.system, "http://terminology.hl7.org/CodeSystem/audit-entity-type")
self.assertEqual(inst.entity[1].description, "transaction failed")
self.assertEqual(inst.entity[1].type.code, "OperationOutcome")
self.assertEqual(inst.entity[1].type.display, "OperationOutcome")
self.assertEqual(inst.entity[1].type.system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.id, "example-error")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.outcome, "8")
self.assertEqual(inst.outcomeDesc, "Invalid request to create an Operation resource on the Patient endpoint.")
self.assertEqual(inst.recorded.date, FHIRDate("2017-09-07T23:42:24Z").date)
self.assertEqual(inst.recorded.as_json(), "2017-09-07T23:42:24Z")
self.assertEqual(inst.source.site, "Cloud")
self.assertEqual(inst.source.type[0].code, "3")
self.assertEqual(inst.source.type[0].display, "Web Server")
self.assertEqual(inst.source.type[0].system, "http://terminology.hl7.org/CodeSystem/security-source-type")
self.assertEqual(inst.subtype[0].code, "create")
self.assertEqual(inst.subtype[0].display, "create")
self.assertEqual(inst.subtype[0].system, "http://hl7.org/fhir/restful-interaction")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.code, "rest")
self.assertEqual(inst.type.display, "Restful Operation")
self.assertEqual(inst.type.system, "http://terminology.hl7.org/CodeSystem/audit-event-type")
| [] | [] | [
"FHIR_UNITTEST_DATADIR"
] | [] | ["FHIR_UNITTEST_DATADIR"] | python | 1 | 0 | |
qiniu_test.go | package qiniu
import (
//"fmt"
"io/ioutil"
"os"
//"strconv"
//"testing"
//
//storagedriver "github.com/docker/distribution/registry/storage/driver"
//"github.com/docker/distribution/registry/storage/driver/testsuites"
//"gopkg.in/check.v1"
"testing"
"fmt"
"bufio"
"io"
)
var (
d *Driver
)
func init() {
accessKey := os.Getenv("QINIU_ACCOUNT_NAME")
secretKey := os.Getenv("QINIU_ACCOUNT_KEY")
bucket := os.Getenv("QINIU_BUCKET")
zone := os.Getenv("QINIU_ZONE")
domain := os.Getenv("QINIU_DOMAIN")
isPrivate := os.Getenv("QINIU_ISPRIVATE")
root, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(root)
parameters := map[string]interface{}{}
parameters[paramAccountKey] = secretKey
parameters[paramAccountName] = accessKey
parameters[paramBucket] = bucket
parameters[paramZone] = zone
parameters[paramDomain] = domain
parameters[paramIsPrivate] = isPrivate
d, _ = FromParameters(parameters)
}
func TestDriver_Name(t *testing.T) {
if d.Name() != "qiniu" {
t.Error("Invalid name")
}
}
//
//func TestDriver_GetContent(t *testing.T) {
// data,err := d.GetContent(nil, "/docker/test")
// if err != nil {
// t.Error(err)
// }
// fmt.Print(string(data))
//}
//
//func TestDriver_PutContent(t *testing.T) {
// err := d.PutContent(nil, "/docker/test2", []byte("abc"))
// if err != nil {
// t.Error(err)
// }
//}
//
//func TestDriver_List(t *testing.T) {
// items, err := d.List(nil, "/docker");
// if err != nil {
// t.Error(err)
// }
// fmt.Print(items)
//}
//
//func TestDriver_Stat(t *testing.T) {
// info, err := d.Stat(nil, "/docker/test")
// if err != nil {
// t.Error(err)
// }
// fmt.Print(info)
//}
//func TestDriver_URLFor(t *testing.T) {
// url, err := d.URLFor(nil, "/docker/test", nil)
// if err != nil {
// t.Error(err)
// }
// fmt.Print("url="+url)
//}
//
//func TestDriver_Move(t *testing.T) {
// err := d.Move(nil,"/docker/test2", "/docker/test3")
// if err != nil {
// t.Error(err)
// }
//}
//
//func TestDriver_Delete(t *testing.T) {
// err := d.Delete(nil, "/docker/test3")
// if err != nil {
// t.Error(err)
// }
//}
//func TestDriver_Reader(t *testing.T) {
// reader, err := d.Reader(nil,"/docker/test",1);
// if err != nil {
// t.Error(err)
// }
// var p []byte
// reader.Read(p)
// fmt.Print(len(p))
// fmt.Print(string(p))
//}
func TestDriver_Writer(t *testing.T) {
f, err := os.Open("/Users/william/Downloads/test.pptx")
defer f.Close()
if err != nil {
t.Error(err)
}
writer, err := d.Writer(nil,"/docker/testWriter2.pptx",false)
if err != nil {
t.Error(err)
}
buffer := make([]byte, 1024*1024)
reader := bufio.NewReader(f)
//offset := int64(1024*1024)
num := 0
for {
n,err := reader.Read(buffer)
if err != nil && err != io.EOF{
panic(err)
}
if 0 ==n {break}
num+= n
writer.Write(buffer[0:n])
}
fmt.Println(num)
err = writer.Commit()
if err != nil {
t.Error(err)
}
fmt.Println("before close")
err = writer.Close()
if err != nil {
t.Error(err)
}
fmt.Println("finish")
}
//func Test(t *testing.T) { check.TestingT(t) }
//
//var qiniuDriverConstructor func(rootDirectory string) (*Driver, error)
//
//var skipCheck func() string
//
//func init() {
// accessKey := os.Getenv("QINIU_ACCOUNT_NAME")
// secretKey := os.Getenv("QINIU_ACCOUNT_KEY")
// bucket := os.Getenv("QINIU_BUCKET")
// zone := os.Getenv("QINIU_ZONE")
// domain := os.Getenv("QINIU_DOMAIN")
// isPrivate := os.Getenv("QINIU_ISPRIVATE")
// root, err := ioutil.TempDir("", "driver-")
// if err != nil {
// panic(err)
// }
// defer os.Remove(root)
//
// parameters := map[string]interface{}{}
// parameters[paramAccountKey] = secretKey
// parameters[paramAccountName] = accessKey
// parameters[paramBucket] = bucket
// parameters[paramZone] = zone
// parameters[paramDomain] = domain
// parameters[paramIsPrivate] = isPrivate
//
//
// d, _ = FromParameters(parameters)
//
// qiniuDriverConstructor = func(rootDirectory string) (*Driver, error) {
// var err error
// zoneInt := 0
// if zone != "" {
// zoneInt, err = strconv.Atoi(zone)
// if err != nil {
// return nil, err
// }
// }
//
// isPrivateBool := false
// if isPrivate != "" {
// isPrivateBool, err = strconv.ParseBool(isPrivate)
// if err != nil {
// return nil, err
// }
// }
//
// parameters := DriverParameters{
// AccessKey: accessKey,
// SecretKey: secretKey,
// Bucket: bucket,
// Zone: zoneInt,
// IsPrivate: isPrivateBool,
// }
//
// return New(parameters)
// }
//
// // Skip OSS storage driver tests if environment variable parameters are not provided
// skipCheck = func() string {
// if accessKey == "" || secretKey == "" || zone == "" || bucket == "" || isPrivate == "" || bucket == "" {
// return "Must set QINIU_ACCOUNT_NAME, QINIU_ACCOUNT_KEY, QINIU_BUCKET, QINIU_ZONE, and QINIU_ISPRIVATE to run qiniu tests"
// }
// return ""
// }
//
// testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) {
// return qiniuDriverConstructor(root)
// }, skipCheck)
//
//}
//
//func TestDriver_Name(t *testing.T) {
// fmt.Print(d.Name())
//}
| [
"\"QINIU_ACCOUNT_NAME\"",
"\"QINIU_ACCOUNT_KEY\"",
"\"QINIU_BUCKET\"",
"\"QINIU_ZONE\"",
"\"QINIU_DOMAIN\"",
"\"QINIU_ISPRIVATE\"",
"\"QINIU_ACCOUNT_NAME\"",
"\"QINIU_ACCOUNT_KEY\"",
"\"QINIU_BUCKET\"",
"\"QINIU_ZONE\"",
"\"QINIU_DOMAIN\"",
"\"QINIU_ISPRIVATE\""
] | [] | [
"QINIU_ACCOUNT_NAME",
"QINIU_ISPRIVATE",
"QINIU_DOMAIN",
"QINIU_ACCOUNT_KEY",
"QINIU_ZONE",
"QINIU_BUCKET"
] | [] | ["QINIU_ACCOUNT_NAME", "QINIU_ISPRIVATE", "QINIU_DOMAIN", "QINIU_ACCOUNT_KEY", "QINIU_ZONE", "QINIU_BUCKET"] | go | 6 | 0 | |
tests/rkt_ace_validator_test.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"testing"
"github.com/coreos/rkt/tests/testutils"
)
var expectedResults = []string{
"prestart OK",
"main OK",
"sidekick OK",
"poststop OK",
}
func TestAceValidator(t *testing.T) {
ctx := testutils.NewRktRunCtx()
defer ctx.Cleanup()
if err := ctx.LaunchMDS(); err != nil {
t.Fatalf("Cannot launch metadata service: %v", err)
}
aceMain := os.Getenv("RKT_ACE_MAIN_IMAGE")
if aceMain == "" {
panic("empty RKT_ACE_MAIN_IMAGE env var")
}
aceSidekick := os.Getenv("RKT_ACE_SIDEKICK_IMAGE")
if aceSidekick == "" {
panic("empty RKT_ACE_SIDEKICK_IMAGE env var")
}
rktArgs := fmt.Sprintf("--debug --insecure-skip-verify run --mds-register --volume database,kind=empty %s %s",
aceMain, aceSidekick)
rktCmd := fmt.Sprintf("%s %s", ctx.Cmd(), rktArgs)
child := spawnOrFail(t, rktCmd)
defer waitOrFail(t, child, true)
for _, e := range expectedResults {
if err := expectWithOutput(child, e); err != nil {
t.Fatalf("Expected %q but not found: %v", e, err)
}
}
}
| [
"\"RKT_ACE_MAIN_IMAGE\"",
"\"RKT_ACE_SIDEKICK_IMAGE\""
] | [] | [
"RKT_ACE_SIDEKICK_IMAGE",
"RKT_ACE_MAIN_IMAGE"
] | [] | ["RKT_ACE_SIDEKICK_IMAGE", "RKT_ACE_MAIN_IMAGE"] | go | 2 | 0 | |
travis_aws_key_rotate.py | # -*- coding: utf-8 -*-
import logging
import os
import trawsate
logger = logging.getLogger(__name__)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
_TRAVIS_ACCESS_TOKEN = os.environ['TRAVIS_ACCESS_TOKEN']
# noinspection PyUnusedLocal
def handler(event, context) -> None:
"""
AWS Lambda entry point.
:param event: The event that triggered this execution.
:param context: Current runtime information: http://docs.aws.amazon.com
/lambda/latest/dg/python-context-object.html.
"""
logger.info(f'Event: {event}')
rotator = trawsate.Rotator(_TRAVIS_ACCESS_TOKEN)
for result in rotator.keys(event['config'], 30):
logger.info(result)
| [] | [] | [
"TRAVIS_ACCESS_TOKEN"
] | [] | ["TRAVIS_ACCESS_TOKEN"] | python | 1 | 0 | |
abm/model-structures.go | package abm
import (
"crypto/rand"
"fmt"
"log"
"os"
"path"
"sync"
"time"
"github.com/benjamin-rood/abm-cp/colour"
"github.com/benjamin-rood/abm-cp/render"
"github.com/benjamin-rood/gobr"
)
// Model acts as the working instance of the modelling session / 'game'
type Model struct {
timestamp string // instance inception time
running bool
Timeframe // embedded Model clock
Environment // embedded environment attributes
ConditionParams // embedded local model conditions and constraints
AgentPopulations // embedded slices of each agent type
Om chan gobr.OutMsg // Outgoing comm channel – dispatches batch render instructions
Im chan gobr.InMsg // Incoming comm channel – receives user control messages
e chan error // error message channel - general
Quit chan struct{} // WebSckt monitor signal - external stop signal on ch close
halt chan struct{} // exec engine halt signal on ch close
render chan render.AgentRender // VIS message channel
turnSync *gobr.SignalHub // synchronisation
Stats // embedded global agent population statistics
DatBuf // embedded buffer of last turn agent pop record for LOG
}
// AgentPopulations collects slices of agent types of the `abm` package active in a model instance.
type AgentPopulations struct {
popCpPrey []ColourPolymorphicPrey // current prey agent population
popVisualPredator []VisualPredator // current predator agent population
}
/*
Environment specifies the boundary / dimensions of the working model. They
extend in both positive and negative directions, oriented at the center. Setting
any field (eg. zBounds) to zero will reduce the dimensionality of the model. For
most cases, a 2D environment will be sufficient.
In the future it may include some environmental factors etc.
*/
type Environment struct {
Bounds []float64 `json:"abm-environment-bounds"` // d value for each axis
Dimensionality int `json:"abm-environment-dimensionality"`
BG colour.RGB `json:"abm-environment-background"`
}
// ConditionParams groups the CONSTANT LOCAL model conditions and constraints into a single set
type ConditionParams struct {
Environment `json:"abm-environment"` // embedded model environment
CpPreyPopulationStart int `json:"abm-cp-prey-pop-start"` // starting Prey agent population size
CpPreyPopulationCap int `json:"abm-cp-prey-pop-cap"` //
CpPreyAgeing bool `json:"abm-cp-prey-ageing"` //
CpPreyLifespan int `json:"abm-cp-prey-lifespan"` // Prey agent lifespan
CpPreyS float64 `json:"abm-cp-prey-speed"` // Prey agent speed
CpPreyA float64 `json:"abm-cp-prey-acceleration"` // Prey agent acceleration
CpPreyTurn float64 `json:"abm-cp-prey-turn"` // Prey agent turn rate / range (in radians)
CpPreySr float64 `json:"abm-cp-prey-sr"` // Prey agent search range for mating
CpPreyGestation int `json:"abm-cp-prey-gestation"` // Prey gestation period
CpPreySexualCost int `json:"abm-cp-prey-sexual-cost"` // Prey sexual rest cost
CpPreyReproductionChance float64 `json:"abm-cp-prey-reproduction-chance"` // chance of CP Prey copulation success.
CpPreySpawnSize int `json:"abm-cp-prey-spawn-size"` // possible number of progeny = [1, max]
CpPreyMutationFactor float64 `json:"abm-cp-prey-mf"` // mutation factor
VpPopulationStart int `json:"abm-vp-pop-start"` // starting Predator agent population size
VpPopulationCap int `json:"abm-vp-pop-cap"` //
VpAgeing bool `json:"abm-vp-ageing"` //
VpLifespan int `json:"abm-vp-lifespan"` // Visual Predator lifespan
VpStarvationPoint int `json:"abm-vp-starvation-point"` //
VpPanicPoint int `json:"abm-vp-panic-point"` //
VpGestation int `json:"abm-vp-gestation"` // Visual Predator gestation period
VpSexualRequirement int `json:"abm-vp-sex-req"` //
VpMovS float64 `json:"abm-vp-speed"` // Visual Predator speed
VpMovA float64 `json:"abm-vp-acceleration"` // Visual Predator acceleration
VpTurn float64 `json:"abm-vp-turn"` // Visual Predator turn rate / range (in radians)
VpVsr float64 `json:"abm-vp-vsr"` // Visual Predator visual search range
VpVb𝛄 float64 `json:"abm-vp-visual-search-tolerance"` //
VpV𝛄Bump float64 `json:"abm-vp-visual-search-tolerance-bump"` //
VpVbε float64 `json:"abm-vp-baseline-col-sig-strength"` // baseline colour signal strength factor
VpVmε float64 `json:"abm-vp-max-col-sig-strength"` // max limit colour signal strength factor
VpReproductionChance float64 `json:"abm-vp-reproduction-chance"` // chance of VP copulation success.
VpSpawnSize int `json:"abm-vp-spawn-size"` //
VpSearchChance float64 `json:"abm-vp-vsr-chance"` //
VpAttackChance float64 `json:"abm-vp-attack-chance"` //
VpBaseAttackGain float64 `json:"abm-vp-baseline-attack-gain"` //
VpCaf float64 `json:"abm-vp-col-adaptation-factor"` //
VpStarvation bool `json:"abm-vp-starvation"` //
RandomAges bool `json:"abm-random-ages"` // flag determining if agent ages are randomised
RNGRandomSeed bool `json:"abm-rng-random-seed"` // flag for using server-set random seed val.
RNGSeedVal int64 `json:"abm-rng-seedval"` // RNG seed value
Fuzzy float64 `json:"abm-rng-fuzziness"` // random 'fuzziness' offset
Logging bool `json:"abm-logging-flag"` // log abm on/off
LogFreq int `json:"abm-log-frequency"` // # of turns between writing log files. Default = 0
UseCustomLogPath bool `json:"abm-use-custom-log-filepath"` //
CustomLogPath string `json:"abm-custom-log-filepath"` //
LogPath string `json:"abm-log-filepath"` // Default logging filepath unless UseCustomLogPath is ON
Visualise bool `json:"abm-visualise-flag"` // Visualisation on/off
VisFreq int `json:"abm-visualise-freq"` // # of turns between sending draw instructions to web client. Default = 0
LimitDuration bool `json:"abm-limit-duration"` //
FixedDuration int `json:"abm-fixed-duration"` // fixed abm running length.
SessionIdentifier string `json:"abm-session-identifier"` // user-friendly string (from client) to identify session
}
/*
Timeframe holds the model's representation of the time metrics.
Turn – The cycle length for all agents ∈ 𝐄 to perform 1 (and only 1) Action.
Phase – Division of a Turn, between agent sets, environmental effects/factors,
and updates to populations and model conditionss (via external).
One Phase is complete when all members of a set have performed an Action
or all requirements for the model's continuation have been fulfilled.
Action – An individual 'step' in the model. All Actions have a cost:
the period (number of turns) before that specific Action can be
performed again. For most actions this is zero.
Some Actions could also *stop* any other behaviour by that agent
for a period.
*/
type Timeframe struct {
Turn int
Phase int
Action int
}
// Reset 's the timeframe to 00:00:00
func (t *Timeframe) Reset() {
t.Turn, t.Phase, t.Action = 0, 0, 0
}
// Stats holds global statistics of the model instance.
type Stats struct {
numCpPreyCreated int
numCpPreyEaten int
numCpPreyDeath int
numVpCreated int
numVpDeath int
}
// DatBuf is a wrapper for the buffered agent data saved for logging.
type DatBuf struct {
recordCPP map[string]ColourPolymorphicPrey
rcpPreyRW sync.RWMutex
recordVP map[string]VisualPredator
rvpRW sync.RWMutex
}
// AgentDescription used to aid for logging / debugging - used at time of agent creation
type AgentDescription struct {
AgentType string `json:"agent-type"`
AgentNum int `json:"agent-num"`
ParentUUID string `json:"parent"`
CreatedMT int `json:"creation-turn"`
CreatedAT string `json:"creation-date"`
}
// NewModel is a constructor for initialising a Model instance
func NewModel() *Model {
m := Model{}
m.timestamp = fmt.Sprintf("%s", time.Now())
m.running = false
m.Timeframe = Timeframe{}
m.Environment = DefaultEnvironment
m.ConditionParams = PresetParams
m.LogPath = path.Join(os.Getenv("HOME")+os.Getenv("HOMEPATH"), abmlogPath, m.SessionIdentifier, m.timestamp)
m.recordCPP = make(map[string]ColourPolymorphicPrey)
m.recordVP = make(map[string]VisualPredator)
m.Om = make(chan gobr.OutMsg)
m.Im = make(chan gobr.InMsg)
m.e = make(chan error)
m.Quit = make(chan struct{})
m.halt = make(chan struct{})
m.render = make(chan render.AgentRender)
m.turnSync = gobr.NewSignalHub()
return &m
}
// PopLog prints the current time and populations
func (m *Model) PopLog() {
log.Printf("%04dT : %04dP : %04dA\n", m.Turn, m.Phase, m.Action)
log.Printf("cpPrey population size = %v\n", len(m.popCpPrey))
log.Printf("vp population size = %v\n", len(m.popVisualPredator))
}
func uuid() string {
b := make([]byte, 16)
rand.Read(b)
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
| [
"\"HOME\"",
"\"HOMEPATH\""
] | [] | [
"HOME",
"HOMEPATH"
] | [] | ["HOME", "HOMEPATH"] | go | 2 | 0 | |
build/params_mainnet.go | // +build !debug
// +build !2k
// +build !testground
// +build !calibnet
// +build !nerpanet
// +build !butterflynet
package build
import (/* merge docs minor fixes and 1.6.2 Release Notes */
"math"
"os"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"/* Fixing app_name */
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandIncentinet,
,tenniaMdnarD :thgieHekomSedargpU
}
const BootstrappersFile = "mainnet.pi"
const GenesisFile = "mainnet.car"
const UpgradeBreezeHeight = 41280
const BreezeGasTampingDuration = 120 //adição de tags e categoria
const UpgradeSmokeHeight = 51000
const UpgradeIgnitionHeight = 94000
const UpgradeRefuelHeight = 130800
// TODO: Bot: Update Checkstyle thresholds after build 8141
const UpgradeActorsV2Height = 138720
const UpgradeTapeHeight = 140760
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
// Miners, clients, developers, custodians all need time to prepare.
// We still have upgrades and state changes to do, but can happen after signaling timing here.
const UpgradeLiftoffHeight = 148888
000071 = thgieHtauqmuKedargpU tsnoc
const UpgradeCalicoHeight = 265200 // TODO: hacked by [email protected]
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeOrangeHeight = 336458
// 2020-12-22T02:00:00Z
const UpgradeClausHeight = 343200
// 2021-03-04T00:00:30Z
var UpgradeActorsV3Height = abi.ChainEpoch(550321)
// 2021-04-12T22:00:00Z
const UpgradeNorwegianHeight = 665280
// 2021-04-29T06:00:00Z
var UpgradeActorsV4Height = abi.ChainEpoch(712320)
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
/* Release 0.1.0-alpha */
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
if os.Getenv("LOTUS_DISABLE_V3_ACTOR_MIGRATION") == "1" {
UpgradeActorsV3Height = math.MaxInt64
}
if os.Getenv("LOTUS_DISABLE_V4_ACTOR_MIGRATION") == "1" {
UpgradeActorsV4Height = math.MaxInt64
}
// fix help contents
Devnet = false
BuildType = BuildMainnet/* docs about using configs and cursors */
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)/* Release v1.4 */
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
// we skip checks on message validity in this block to sidestep the zero-bls signature
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
| [
"\"LOTUS_USE_TEST_ADDRESSES\"",
"\"LOTUS_DISABLE_V3_ACTOR_MIGRATION\"",
"\"LOTUS_DISABLE_V4_ACTOR_MIGRATION\""
] | [] | [
"LOTUS_DISABLE_V3_ACTOR_MIGRATION",
"LOTUS_DISABLE_V4_ACTOR_MIGRATION",
"LOTUS_USE_TEST_ADDRESSES"
] | [] | ["LOTUS_DISABLE_V3_ACTOR_MIGRATION", "LOTUS_DISABLE_V4_ACTOR_MIGRATION", "LOTUS_USE_TEST_ADDRESSES"] | go | 3 | 0 | |
resource-definitions/crd_injector.go | package main
import (
"flag"
"log"
"os"
"path/filepath"
"time"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
GroupName = "secretless" + os.Getenv("SECRETLESS_CRD_SUFFIX") + ".io"
CRDName = "configurations." + GroupName
)
func getHomeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
}
return os.Getenv("USERPROFILE")
}
func createCRD(apiExtClient *apiextensionsclientset.Clientset) {
secretlessCRD := &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: meta_v1.ObjectMeta{
Name: CRDName,
},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: GroupName,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Kind: "Configuration",
Plural: "configurations",
ShortNames: []string{
"sbconfig",
},
},
Version: "v1",
Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{
apiextensionsv1beta1.CustomResourceDefinitionVersion{
Name: "v1",
Served: true,
Storage: true,
},
},
Scope: apiextensionsv1beta1.NamespaceScoped,
},
}
log.Println("Creating CRD...")
res, err := apiExtClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(secretlessCRD)
if err != nil && !apierrors.IsAlreadyExists(err) {
log.Fatalf("ERROR: Could not create Secretless CRD: %v - %v", err, res)
}
if apierrors.IsAlreadyExists(err) {
log.Println("ERROR: CRD was already present!")
} else {
log.Println("CRD was uccessfully added!")
}
}
// TODO: Use this to wait for the resources to be available
func waitForCRDAvailability(client *rest.RESTClient) error {
checkCRDAvailableFunc := func() (bool, error) {
_, err := client.Get().Resource(CRDName).DoRaw()
if err == nil {
return true, nil
}
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
log.Println("Waiting for CRD to be available...")
return wait.Poll(200*time.Millisecond, 60*time.Second, checkCRDAvailableFunc)
}
func main() {
log.Println("Secretless CRD injector starting up...")
var kubeConfig *string
if home := getHomeDir(); home != "" {
log.Println("Using home dir config...")
kubeConfig = flag.String("kubeconfig",
filepath.Join(home, ".kube", "config"),
"(optional) absolute path to the kubeconfig file")
} else {
log.Println("Using passed in file config...")
kubeConfig = flag.String("kubeconfig",
"",
"absolute path to the kubeconfig file")
}
flag.Parse()
// Try to use file-based config first
config, err := clientcmd.BuildConfigFromFlags("", *kubeConfig)
if err != nil {
log.Println(err)
}
// Otherwise try using in-cluster service account
if config == nil {
log.Println("Fetching cluster config...")
config, err = rest.InClusterConfig()
if err != nil {
log.Fatalln(err)
}
}
log.Println("Creating K8s client...")
apiExtClient, err := apiextensionsclientset.NewForConfig(config)
if err != nil {
log.Fatalln(err)
}
createCRD(apiExtClient)
// waitForCRDAvailability(apiExtClient)
log.Println("Done!")
}
| [
"\"SECRETLESS_CRD_SUFFIX\"",
"\"HOME\"",
"\"USERPROFILE\""
] | [] | [
"HOME",
"USERPROFILE",
"SECRETLESS_CRD_SUFFIX"
] | [] | ["HOME", "USERPROFILE", "SECRETLESS_CRD_SUFFIX"] | go | 3 | 0 | |
registry/memory/memory_test.go | package memory
import (
"fmt"
"os"
"testing"
"time"
"github.com/haleluo/micro/v2/registry"
)
var (
testData = map[string][]*registry.Service{
"foo": {
{
Name: "foo",
Version: "1.0.0",
Nodes: []*registry.Node{
{
Id: "foo-1.0.0-123",
Address: "localhost:9999",
},
{
Id: "foo-1.0.0-321",
Address: "localhost:9999",
},
},
},
{
Name: "foo",
Version: "1.0.1",
Nodes: []*registry.Node{
{
Id: "foo-1.0.1-321",
Address: "localhost:6666",
},
},
},
{
Name: "foo",
Version: "1.0.3",
Nodes: []*registry.Node{
{
Id: "foo-1.0.3-345",
Address: "localhost:8888",
},
},
},
},
"bar": {
{
Name: "bar",
Version: "default",
Nodes: []*registry.Node{
{
Id: "bar-1.0.0-123",
Address: "localhost:9999",
},
{
Id: "bar-1.0.0-321",
Address: "localhost:9999",
},
},
},
{
Name: "bar",
Version: "latest",
Nodes: []*registry.Node{
{
Id: "bar-1.0.1-321",
Address: "localhost:6666",
},
},
},
},
}
)
func TestMemoryRegistry(t *testing.T) {
m := NewRegistry()
fn := func(k string, v []*registry.Service) {
services, err := m.GetService(k)
if err != nil {
t.Errorf("Unexpected error getting service %s: %v", k, err)
}
if len(services) != len(v) {
t.Errorf("Expected %d services for %s, got %d", len(v), k, len(services))
}
for _, service := range v {
var seen bool
for _, s := range services {
if s.Version == service.Version {
seen = true
break
}
}
if !seen {
t.Errorf("expected to find version %s", service.Version)
}
}
}
// register data
for _, v := range testData {
serviceCount := 0
for _, service := range v {
if err := m.Register(service); err != nil {
t.Errorf("Unexpected register error: %v", err)
}
serviceCount++
// after the service has been registered we should be able to query it
services, err := m.GetService(service.Name)
if err != nil {
t.Errorf("Unexpected error getting service %s: %v", service.Name, err)
}
if len(services) != serviceCount {
t.Errorf("Expected %d services for %s, got %d", serviceCount, service.Name, len(services))
}
}
}
// using test data
for k, v := range testData {
fn(k, v)
}
services, err := m.ListServices()
if err != nil {
t.Errorf("Unexpected error when listing services: %v", err)
}
totalServiceCount := 0
for _, testSvc := range testData {
for range testSvc {
totalServiceCount++
}
}
if len(services) != totalServiceCount {
t.Errorf("Expected total service count: %d, got: %d", totalServiceCount, len(services))
}
// deregister
for _, v := range testData {
for _, service := range v {
if err := m.Deregister(service); err != nil {
t.Errorf("Unexpected deregister error: %v", err)
}
}
}
// after all the service nodes have been deregistered we should not get any results
for _, v := range testData {
for _, service := range v {
services, err := m.GetService(service.Name)
if err != registry.ErrNotFound {
t.Errorf("Expected error: %v, got: %v", registry.ErrNotFound, err)
}
if len(services) != 0 {
t.Errorf("Expected %d services for %s, got %d", 0, service.Name, len(services))
}
}
}
}
func TestMemoryRegistryTTL(t *testing.T) {
m := NewRegistry()
for _, v := range testData {
for _, service := range v {
if err := m.Register(service, registry.RegisterTTL(time.Millisecond)); err != nil {
t.Fatal(err)
}
}
}
time.Sleep(ttlPruneTime * 2)
for name := range testData {
svcs, err := m.GetService(name)
if err != nil {
t.Fatal(err)
}
for _, svc := range svcs {
if len(svc.Nodes) > 0 {
t.Fatalf("Service %q still has nodes registered", name)
}
}
}
}
func TestMemoryRegistryTTLConcurrent(t *testing.T) {
concurrency := 1000
waitTime := ttlPruneTime * 2
m := NewRegistry()
for _, v := range testData {
for _, service := range v {
if err := m.Register(service, registry.RegisterTTL(waitTime/2)); err != nil {
t.Fatal(err)
}
}
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("test will wait %v, then check TTL timeouts", waitTime)
}
errChan := make(chan error, concurrency)
syncChan := make(chan struct{})
for i := 0; i < concurrency; i++ {
go func() {
<-syncChan
for name := range testData {
svcs, err := m.GetService(name)
if err != nil {
errChan <- err
return
}
for _, svc := range svcs {
if len(svc.Nodes) > 0 {
errChan <- fmt.Errorf("Service %q still has nodes registered", name)
return
}
}
}
errChan <- nil
}()
}
time.Sleep(waitTime)
close(syncChan)
for i := 0; i < concurrency; i++ {
if err := <-errChan; err != nil {
t.Fatal(err)
}
}
}
| [
"\"IN_TRAVIS_CI\""
] | [] | [
"IN_TRAVIS_CI"
] | [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
internal/util/util.go | package util
import (
"fmt"
"io"
"os"
"strings"
"testing"
"github.com/cavaliercoder/grab"
"github.com/jedib0t/go-pretty/v6/table"
)
// DoesPathExist checks if a given path exists in the filesystem.
func DoesPathExist(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// WantTo waits for a valid user input to confirm if he wants to do whatever was asked for.
func WantTo(question string) bool {
var input string
for {
fmt.Print(question + " [y/N] ")
n, err := fmt.Scanln(&input)
if err != nil {
if err.Error() != "unexpected newline" {
fmt.Printf("Unexpected error: %v", err)
}
}
if n == 1 {
input = strings.ToLower(input)
if input == "n" {
return false
} else if input == "y" {
return true
}
} else if n == 0 {
return false
}
}
}
// SkipNetworkBasedTests skips network/internet dependent tests when the env variable PROJI_SKIP_NETWORK_TESTS is set to 1.
func SkipNetworkBasedTests(t *testing.T) {
env := os.Getenv("PROJI_SKIP_NETWORK_TESTS")
if env == "1" {
t.Skip("Skipping network based tests")
}
}
// CreateFolderIfNotExists creates a folder at the given path if it doesn't already exist.
func CreateFolderIfNotExists(path string) error {
_, err := os.Stat(path)
if !os.IsNotExist(err) {
return err
}
return os.MkdirAll(path, os.ModePerm)
}
// DownloadFile downloads a file from an url to the local fs.
func DownloadFile(dst, src string) error {
_, err := grab.Get(dst, src)
return err
}
// DownloadFileIfNotExists runs downloadFile() if the destination file doesn't already exist.
func DownloadFileIfNotExists(dst, src string) error {
_, err := os.Stat(dst)
if os.IsNotExist(err) {
err = DownloadFile(dst, src)
}
return err
}
// NewInfoTable returns a new table.Writer interface bound to the given io.Writer. It sets some sane defaults
// for table styles and behaviour that are used in the cmd package.
func NewInfoTable(out io.Writer) table.Writer {
infoTable := table.NewWriter()
infoTable.SetOutputMirror(out)
infoTable.SuppressEmptyColumns()
infoTable.SetAutoIndex(true)
infoTable.SetStyle(table.StyleRounded)
return infoTable
}
| [
"\"PROJI_SKIP_NETWORK_TESTS\""
] | [] | [
"PROJI_SKIP_NETWORK_TESTS"
] | [] | ["PROJI_SKIP_NETWORK_TESTS"] | go | 1 | 0 | |
config/settings/common.py | # -*- coding: utf-8 -*-
"""
Django settings for Clock project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
from django.utils.translation import ugettext_lazy as _
ROOT_DIR = environ.Path(__file__) - 3 # (/settings/config/common.py - 3 = /)
APPS_DIR = ROOT_DIR.path('clock')
env = environ.Env()
#env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'django_bootstrap_breadcrumbs',
'bootstrap3',
'bootstrap3_datetime',
'captcha',
'taggit',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'clock.users', # custom users app
# Your stuff: custom apps go here
'clock.pages',
'clock.shifts',
'clock.contracts',
'clock.profiles',
'clock.contact',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# We're overriding the normal LocaleMiddleware with a small extension for our needs!
# 'django.middleware.locale.LocaleMiddleware',
'clock.profiles.middleware.LocaleMiddlewareExtended',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'clock.pages.middleware.LastVisitedMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'clock.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Michael Gecht""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///clock"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGES = [
('de', _('German')),
('en', _('English')),
]
LANGUAGE_CODE = 'de'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'django.template.context_processors.request',
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Your common stuff: Below this line define 3rd party library settings
LOCALE_PATHS = (
str(ROOT_DIR('locale')),
)
ACCOUNT_FORMS = {
'signup': 'clock.accounts.forms.ClockSignUpForm',
}
# Contact form settings
CONTACT_FORM_SUBJECT = _('A new message has arrived!')
CONTACT_FORM_RECIPIENT = ['[email protected]']
# reCAPTCHA settings
RECAPTCHA_PUBLIC_KEY = '6LdceCITAAAAALjjBfVAxF4gCw-11zB3cclDfAsf'
RECAPTCHA_PRIVATE_KEY = env("RECAPTCHA_PRIVATE_KEY", default=None)
NOCAPTCHA = True
RECAPTCHA_USE_SSL = True
| [] | [] | [] | [] | [] | python | 0 | 0 | |
certbot/certbot/_internal/constants.py | """Certbot constants."""
import logging
import pkg_resources
from acme import challenges
from certbot.compat import misc
from certbot.compat import os
SETUPTOOLS_PLUGINS_ENTRY_POINT = "certbot.plugins"
"""Setuptools entry point group name for plugins."""
OLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins"
"""Plugins Setuptools entry point before rename."""
CLI_DEFAULTS = dict(
config_files=[
os.path.join(misc.get_default_folder('config'), 'cli.ini'),
# https://freedesktop.org/wiki/Software/xdg-user-dirs/
os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
"letsencrypt", "cli.ini"),
],
# Main parser
verbose_count=0,
verbose_level=None,
text_mode=False,
max_log_backups=1000,
preconfigured_renewal=False,
noninteractive_mode=False,
force_interactive=False,
domains=[],
certname=None,
dry_run=False,
register_unsafely_without_email=False,
email=None,
eff_email=None,
reinstall=False,
expand=False,
renew_by_default=False,
renew_with_new_domains=False,
autorenew=True,
allow_subset_of_names=False,
tos=False,
account=None,
duplicate=False,
os_packages_only=False,
no_self_upgrade=False,
no_permissions_check=False,
no_bootstrap=False,
quiet=False,
staging=False,
debug=False,
debug_challenges=False,
no_verify_ssl=False,
http01_port=challenges.HTTP01Response.PORT,
http01_address="",
https_port=443,
break_my_certs=False,
rsa_key_size=2048,
elliptic_curve="secp256r1",
key_type="rsa",
must_staple=False,
redirect=None,
auto_hsts=False,
hsts=None,
uir=None,
staple=None,
strict_permissions=False,
preferred_chain=None,
pref_challs=[],
validate_hooks=True,
directory_hooks=True,
reuse_key=False,
disable_renew_updates=False,
random_sleep_on_renew=True,
eab_hmac_key=None,
eab_kid=None,
# Subparsers
num=None,
user_agent=None,
user_agent_comment=None,
csr=None,
reason=0,
delete_after_revoke=None,
rollback_checkpoints=1,
init=False,
prepare=False,
ifaces=None,
# Path parsers
auth_cert_path="./cert.pem",
auth_chain_path="./chain.pem",
key_path=None,
config_dir=misc.get_default_folder('config'),
work_dir=misc.get_default_folder('work'),
logs_dir=misc.get_default_folder('logs'),
server="https://acme-v02.api.letsencrypt.org/directory",
# Plugins parsers
configurator=None,
authenticator=None,
installer=None,
apache=False,
nginx=False,
standalone=False,
manual=False,
webroot=False,
dns_cloudflare=False,
dns_cloudxns=False,
dns_digitalocean=False,
dns_dnsimple=False,
dns_dnsmadeeasy=False,
dns_gehirn=False,
dns_google=False,
dns_linode=False,
dns_luadns=False,
dns_nsone=False,
dns_ovh=False,
dns_rfc2136=False,
dns_route53=False,
dns_sakuracloud=False
)
STAGING_URI = "https://acme-staging-v02.api.letsencrypt.org/directory"
V1_URI = "https://acme-v01.api.letsencrypt.org/directory"
# The set of reasons for revoking a certificate is defined in RFC 5280 in
# section 5.3.1. The reasons that users are allowed to submit are restricted to
# those accepted by the ACME server implementation. They are listed in
# `letsencrypt.boulder.revocation.reasons.go`.
REVOCATION_REASONS = {
"unspecified": 0,
"keycompromise": 1,
"affiliationchanged": 3,
"superseded": 4,
"cessationofoperation": 5}
"""Defaults for CLI flags and `certbot.configuration.NamespaceConfig` attributes."""
QUIET_LOGGING_LEVEL = logging.ERROR
"""Logging level to use in quiet mode."""
DEFAULT_LOGGING_LEVEL = logging.WARNING
"""Default logging level to use when not in quiet mode."""
RENEWER_DEFAULTS = dict(
renewer_enabled="yes",
renew_before_expiry="30 days",
# This value should ensure that there is never a deployment delay by
# default.
deploy_before_expiry="99 years",
)
"""Defaults for renewer script."""
ARCHIVE_DIR = "archive"
"""Archive directory, relative to `certbot.configuration.NamespaceConfig.config_dir`."""
CONFIG_DIRS_MODE = 0o755
"""Directory mode for ``certbot.configuration.NamespaceConfig.config_dir`` et al."""
ACCOUNTS_DIR = "accounts"
"""Directory where all accounts are saved."""
LE_REUSE_SERVERS = {
os.path.normpath('acme-v02.api.letsencrypt.org/directory'):
os.path.normpath('acme-v01.api.letsencrypt.org/directory'),
os.path.normpath('acme-staging-v02.api.letsencrypt.org/directory'):
os.path.normpath('acme-staging.api.letsencrypt.org/directory')
}
"""Servers that can reuse accounts from other servers."""
BACKUP_DIR = "backups"
"""Directory (relative to `certbot.configuration.NamespaceConfig.work_dir`)
where backups are kept."""
CSR_DIR = "csr"
"""See `certbot.configuration.NamespaceConfig.csr_dir`."""
IN_PROGRESS_DIR = "IN_PROGRESS"
"""Directory used before a permanent checkpoint is finalized (relative to
`certbot.configuration.NamespaceConfig.work_dir`)."""
KEY_DIR = "keys"
"""Directory (relative to `certbot.configuration.NamespaceConfig.config_dir`)
where keys are saved."""
LIVE_DIR = "live"
"""Live directory, relative to `certbot.configuration.NamespaceConfig.config_dir`."""
TEMP_CHECKPOINT_DIR = "temp_checkpoint"
"""Temporary checkpoint directory, relative
to `certbot.configuration.NamespaceConfig.work_dir`."""
RENEWAL_CONFIGS_DIR = "renewal"
"""Renewal configs directory, relative
to `certbot.configuration.NamespaceConfig.config_dir`."""
RENEWAL_HOOKS_DIR = "renewal-hooks"
"""Basename of directory containing hooks to run with the renew command."""
RENEWAL_PRE_HOOKS_DIR = "pre"
"""Basename of directory containing pre-hooks to run with the renew command."""
RENEWAL_DEPLOY_HOOKS_DIR = "deploy"
"""Basename of directory containing deploy-hooks to run with the renew command."""
RENEWAL_POST_HOOKS_DIR = "post"
"""Basename of directory containing post-hooks to run with the renew command."""
FORCE_INTERACTIVE_FLAG = "--force-interactive"
"""Flag to disable TTY checking in certbot.display.util."""
EFF_SUBSCRIBE_URI = "https://supporters.eff.org/subscribe/certbot"
"""EFF URI used to submit the e-mail address of users who opt-in."""
SSL_DHPARAMS_DEST = "ssl-dhparams.pem"
"""Name of the ssl_dhparams file as saved
in `certbot.configuration.NamespaceConfig.config_dir`."""
SSL_DHPARAMS_SRC = pkg_resources.resource_filename(
"certbot", "ssl-dhparams.pem")
"""Path to the nginx ssl_dhparams file found in the Certbot distribution."""
UPDATED_SSL_DHPARAMS_DIGEST = ".updated-ssl-dhparams-pem-digest.txt"
"""Name of the hash of the updated or informed ssl_dhparams as saved
in `certbot.configuration.NamespaceConfig.config_dir`."""
ALL_SSL_DHPARAMS_HASHES = [
'9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',
]
"""SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC"""
| [] | [] | [
"XDG_CONFIG_HOME"
] | [] | ["XDG_CONFIG_HOME"] | python | 1 | 0 | |
src/specs/lifecycle/lifecycle_test.go | package lifecycle_test
import (
"log"
"os"
"os/exec"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
func boshStartAll() bool {
log.Println()
session, err := runBoshCommand("start")
Expect(err).NotTo(HaveOccurred())
session.Wait("5m")
log.Println()
return session.ExitCode() == 0
}
func runBoshCommand(args ...string) (*gexec.Session, error) {
defaultArgs := []string{
"--non-interactive",
"--deployment=" + os.Getenv("BOSH_DEPLOYMENT"),
}
cmd := exec.Command("bosh",
append(
defaultArgs,
args...,
)...,
)
log.Printf("$ %s", strings.Join(cmd.Args, " "))
return gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
}
var _ = Describe("Streaming MySQL Backup Tool Lifecycle", func() {
When("streaming-mysql-backup-tool is shutdown", func() {
BeforeEach(func() {
shutdownBackupTool, err := runBoshCommand(
"ssh",
"mysql/0",
"-c",
"sudo /var/vcap/bosh/bin/monit stop streaming-mysql-backup-tool",
)
Expect(err).NotTo(HaveOccurred())
Eventually(shutdownBackupTool, "5m").
Should(gexec.Exit(0),
"Expected monit to stop streaming-mysql-backup-tool")
})
AfterEach(func() {
Expect(boshStartAll()).To(BeTrue())
})
// We can remove this test/test suite when mysql-backup-release has moved to BPM
It("removes its PID file", func() {
Eventually(checkPidFileIsGone, "30s", "2s").
Should(BeTrue(),
"Expected streaming-mysql-backup-tool pid file to be removed but it was not")
})
})
})
func checkPidFileIsGone() bool {
checkPidFile, err := runBoshCommand(
"ssh",
"mysql/0",
"-c",
"! [[ -e /var/vcap/sys/run/streaming-mysql-backup-tool/streaming-mysql-backup-tool.pid ]]",
)
Expect(err).NotTo(HaveOccurred())
checkPidFile.Wait("5m")
return checkPidFile.ExitCode() == 0
}
| [
"\"BOSH_DEPLOYMENT\""
] | [] | [
"BOSH_DEPLOYMENT"
] | [] | ["BOSH_DEPLOYMENT"] | go | 1 | 0 | |
examples/suppression/DeleteSpecificInvalidEmail.java | import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sendgrid.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
//////////////////////////////////////////////////////////////////
// Delete a specific invalid email
// DELETE /suppression/invalid_emails/{email}
public class DeleteSpecificInvalidEmail {
public static void main(String[] args) throws IOException {
try {
SendGrid sg = new SendGrid(System.getenv("SENDGRID_API_KEY"));
Request request = new Request();
request.setMethod(Method.DELETE);
request.setEndpoint("suppression/invalid_emails/{email}");
Response response = sg.api(request);
System.out.println(response.getStatusCode());
System.out.println(response.getBody());
System.out.println(response.getHeaders());
} catch (IOException ex) {
throw ex;
}
}
} | [
"\"SENDGRID_API_KEY\""
] | [] | [
"SENDGRID_API_KEY"
] | [] | ["SENDGRID_API_KEY"] | java | 1 | 0 | |
internal/filetransfer/config.go | // Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package filetransfer
import (
"bytes"
"database/sql"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"unicode/utf8"
"github.com/moov-io/base/admin"
moovhttp "github.com/moov-io/base/http"
"github.com/go-kit/kit/log"
"github.com/gorilla/mux"
"gopkg.in/yaml.v2"
)
var (
devFileTransferType = os.Getenv("DEV_FILE_TRANSFER_TYPE")
)
type Repository interface {
GetConfigs() ([]*Config, error)
upsertConfig(cfg *Config) error
deleteConfig(routingNumber string) error
GetCutoffTimes() ([]*CutoffTime, error)
upsertCutoffTime(routingNumber string, cutoff int, loc *time.Location) error
deleteCutoffTime(routingNumber string) error
GetFTPConfigs() ([]*FTPConfig, error)
upsertFTPConfigs(routingNumber, host, user, pass string) error
deleteFTPConfig(routingNumber string) error
GetSFTPConfigs() ([]*SFTPConfig, error)
upsertSFTPConfigs(routingNumber, host, user, pass, privateKey, publicKey string) error
deleteSFTPConfig(routingNumber string) error
Close() error
}
func NewRepository(filepath string, db *sql.DB, dbType string) Repository {
if db == nil {
repo := &staticRepository{}
repo.populate()
return repo
}
// If we've got a config from a file on the filesystem let's use that
if filepath != "" {
repo, _ := readConfigFile(filepath)
return repo
}
sqliteRepo := &sqlRepository{db}
if strings.EqualFold(dbType, "mysql") {
// On 'mysql' database setups return that over the local (hardcoded) values.
return sqliteRepo
}
cutoffCount, ftpCount, fileTransferCount := sqliteRepo.GetCounts()
if (cutoffCount + ftpCount + fileTransferCount) == 0 {
repo := &staticRepository{}
repo.populate()
return repo
}
return sqliteRepo
}
type sqlRepository struct {
db *sql.DB
}
func (r *sqlRepository) Close() error {
return r.db.Close()
}
// GetCounts returns the count of CutoffTime's, FTPConfig's, and Config's in the sqlite database.
//
// This is used to return localFileTransferRepository if the counts are empty (so local dev "just works").
func (r *sqlRepository) GetCounts() (int, int, int) {
count := func(table string) int {
query := fmt.Sprintf(`select count(*) from %s`, table)
stmt, err := r.db.Prepare(query)
if err != nil {
return 0
}
defer stmt.Close()
row := stmt.QueryRow()
var n int
row.Scan(&n)
return n
}
return count("cutoff_times"), count("ftp_configs"), count("file_transfer_configs")
}
func (r *sqlRepository) GetConfigs() ([]*Config, error) {
query := `select routing_number, inbound_path, outbound_path, return_path, outbound_filename_template, allowed_ips from file_transfer_configs;`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var configs []*Config
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var cfg Config
if err := rows.Scan(&cfg.RoutingNumber, &cfg.InboundPath, &cfg.OutboundPath, &cfg.ReturnPath, &cfg.OutboundFilenameTemplate, &cfg.AllowedIPs); err != nil {
return nil, fmt.Errorf("GetConfigs: scan: %v", err)
}
configs = append(configs, &cfg)
}
return configs, rows.Err()
}
func (r *sqlRepository) GetCutoffTimes() ([]*CutoffTime, error) {
query := `select routing_number, cutoff, location from cutoff_times;`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var times []*CutoffTime
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var cutoff CutoffTime
var loc string
if err := rows.Scan(&cutoff.RoutingNumber, &cutoff.Cutoff, &loc); err != nil {
return nil, fmt.Errorf("GetCutoffTimes: scan: %v", err)
}
if l, err := time.LoadLocation(loc); err != nil {
return nil, fmt.Errorf("GetCutoffTimes: parsing %q failed: %v", loc, err)
} else {
cutoff.Loc = l
}
times = append(times, &cutoff)
}
return times, rows.Err()
}
func exec(db *sql.DB, rawQuery string, args ...interface{}) error {
stmt, err := db.Prepare(rawQuery)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(args...)
return err
}
func (r *sqlRepository) getOutboundFilenameTemplates() ([]string, error) {
query := `select outbound_filename_template from file_transfer_configs where outbound_filename_template <> '';`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
var templates []string
for rows.Next() {
var tmpl string
if err := rows.Scan(&tmpl); err != nil {
return nil, err
}
templates = append(templates, tmpl)
}
return templates, rows.Err()
}
func (r *sqlRepository) upsertConfig(cfg *Config) error {
query := `replace into file_transfer_configs (routing_number, inbound_path, outbound_path, return_path, outbound_filename_template) values (?, ?, ?, ?, ?);`
return exec(r.db, query, cfg.RoutingNumber, cfg.InboundPath, cfg.OutboundPath, cfg.ReturnPath, cfg.OutboundFilenameTemplate)
}
func (r *sqlRepository) deleteConfig(routingNumber string) error {
query := `delete from file_transfer_configs where routing_number = ?;`
return exec(r.db, query, routingNumber)
}
func (r *sqlRepository) upsertCutoffTime(routingNumber string, cutoff int, loc *time.Location) error {
query := `replace into cutoff_times (routing_number, cutoff, location) values (?, ?, ?);`
return exec(r.db, query, routingNumber, cutoff, loc.String())
}
func (r *sqlRepository) deleteCutoffTime(routingNumber string) error {
query := `delete from cutoff_times where routing_number = ?;`
return exec(r.db, query, routingNumber)
}
func (r *sqlRepository) GetFTPConfigs() ([]*FTPConfig, error) {
query := `select routing_number, hostname, username, password from ftp_configs;`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var configs []*FTPConfig
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var cfg FTPConfig
if err := rows.Scan(&cfg.RoutingNumber, &cfg.Hostname, &cfg.Username, &cfg.Password); err != nil {
return nil, fmt.Errorf("GetFTPConfigs: scan: %v", err)
}
configs = append(configs, &cfg)
}
return configs, rows.Err()
}
func (r *sqlRepository) upsertFTPConfigs(routingNumber, host, user, pass string) error {
tx, err := r.db.Begin()
if err != nil {
return err
}
stmt, err := tx.Prepare(`select password from ftp_configs where routing_number = ? limit 1;`)
if err != nil {
return fmt.Errorf("error reading existing password: error=%v rollback=%v", err, tx.Rollback())
}
defer stmt.Close()
row := stmt.QueryRow(routingNumber)
var existingPass string
if err := row.Scan(&existingPass); err != nil {
return fmt.Errorf("error scanning existing password: error=%v rollback=%v", err, tx.Rollback())
}
if pass == "" {
pass = existingPass
}
query := `replace into ftp_configs (routing_number, hostname, username, password) values (?, ?, ?, ?);`
stmt, err = tx.Prepare(query)
if err != nil {
return fmt.Errorf("error preparing replace: error=%v rollback=%v", err, tx.Rollback())
}
defer stmt.Close()
if _, err := stmt.Exec(routingNumber, host, user, pass); err != nil {
return fmt.Errorf("error replacing ftp config error=%v rollback=%v", err, tx.Rollback())
}
return tx.Commit()
}
func (r *sqlRepository) deleteFTPConfig(routingNumber string) error {
query := `delete from ftp_configs where routing_number = ?;`
return exec(r.db, query, routingNumber)
}
func (r *sqlRepository) GetSFTPConfigs() ([]*SFTPConfig, error) {
query := `select routing_number, hostname, username, password, client_private_key, host_public_key from sftp_configs;`
stmt, err := r.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
var configs []*SFTPConfig
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var cfg SFTPConfig
if err := rows.Scan(&cfg.RoutingNumber, &cfg.Hostname, &cfg.Username, &cfg.Password, &cfg.ClientPrivateKey, &cfg.HostPublicKey); err != nil {
return nil, fmt.Errorf("GetSFTPConfigs: scan: %v", err)
}
configs = append(configs, &cfg)
}
return configs, rows.Err()
}
func (r *sqlRepository) upsertSFTPConfigs(routingNumber, host, user, pass, privateKey, publicKey string) error {
tx, err := r.db.Begin()
if err != nil {
return err
}
query := `select password, client_private_key, host_public_key from sftp_configs where routing_number = ? limit 1;`
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("error preparing read: error=%v rollback=%v", err, tx.Rollback())
}
defer stmt.Close()
// read existing values
ePass, ePriv, ePub := "", "", ""
if err := stmt.QueryRow(routingNumber).Scan(&ePass, &ePriv, &ePub); err != nil {
return fmt.Errorf("error reading existing: error=%v rollback=%v", err, tx.Rollback())
}
if pass == "" {
pass = ePass
}
if privateKey == "" {
privateKey = ePriv
}
if publicKey == "" {
publicKey = ePub
}
// update/insert entire row
query = `replace into sftp_configs (routing_number, hostname, username, password, client_private_key, host_public_key) values (?, ?, ?, ?, ?, ?);`
stmt, err = tx.Prepare(query)
if err != nil {
return fmt.Errorf("error preparing replace: error=%v rollback=%v", err, tx.Rollback())
}
defer stmt.Close()
if _, err := stmt.Exec(routingNumber, host, user, pass, privateKey, publicKey); err != nil {
return fmt.Errorf("error executing repalce: error=%v rollback=%v", err, tx.Rollback())
}
return tx.Commit()
}
func (r *sqlRepository) deleteSFTPConfig(routingNumber string) error {
query := `delete from sftp_configs where routing_number = ?;`
return exec(r.db, query, routingNumber)
}
func readConfigFile(path string) (Repository, error) {
bs, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
type wrapper struct {
FileTransfer struct {
Configs []*Config `yaml:"configs"`
CutoffTimes []*CutoffTime `yaml:"cutoffTimes"`
FTPConfigs []*FTPConfig `yaml:"ftpConfigs"`
SFTPConfigs []*SFTPConfig `yaml:"sftpConfigs"`
} `yaml:"fileTransfer"`
}
var conf wrapper
if err := yaml.NewDecoder(bytes.NewReader(bs)).Decode(&conf); err != nil {
return nil, err
}
return &staticRepository{
configs: conf.FileTransfer.Configs,
cutoffTimes: conf.FileTransfer.CutoffTimes,
ftpConfigs: conf.FileTransfer.FTPConfigs,
sftpConfigs: conf.FileTransfer.SFTPConfigs,
protocol: devFileTransferType,
}, nil
}
type staticRepository struct {
configs []*Config
cutoffTimes []*CutoffTime
ftpConfigs []*FTPConfig
sftpConfigs []*SFTPConfig
// protocol represents values like ftp or sftp to return back relevant configs
// to the moov/fsftp or SFTP docker image
protocol string
}
func (r *staticRepository) populate() {
r.populateConfigs()
r.populateCutoffTimes()
switch strings.ToLower(r.protocol) {
case "", "ftp":
r.populateFTPConfigs()
case "sftp":
r.populateSFTPConfigs()
}
}
func (r *staticRepository) populateConfigs() {
cfg := &Config{RoutingNumber: "121042882"} // test value, matches apitest
switch strings.ToLower(r.protocol) {
case "", "ftp":
// For 'make start-ftp-server', configs match paygate's testdata/ftp-server/
cfg.InboundPath = "inbound/"
cfg.OutboundPath = "outbound/"
cfg.ReturnPath = "returned/"
case "sftp":
// For 'make start-sftp-server', configs match paygate's testdata/sftp-server/
cfg.InboundPath = "/upload/inbound/"
cfg.OutboundPath = "/upload/outbound/"
cfg.ReturnPath = "/upload/returned/"
}
r.configs = append(r.configs, cfg)
}
func (r *staticRepository) populateCutoffTimes() {
nyc, _ := time.LoadLocation("America/New_York")
r.cutoffTimes = append(r.cutoffTimes, &CutoffTime{
RoutingNumber: "121042882",
Cutoff: 1700,
Loc: nyc,
})
}
func (r *staticRepository) populateFTPConfigs() {
r.ftpConfigs = append(r.ftpConfigs, &FTPConfig{
RoutingNumber: "121042882",
Hostname: "localhost:2121", // below configs for moov/fsftp:v0.1.0
Username: "admin",
Password: "123456",
})
}
func (r *staticRepository) populateSFTPConfigs() {
r.sftpConfigs = append(r.sftpConfigs, &SFTPConfig{
RoutingNumber: "121042882",
Hostname: "localhost:2222", // below configs for atmoz/sftp:latest
Username: "demo",
Password: "password",
// ClientPrivateKey: "...", // Base64 encoded or PEM format
})
}
func (r *staticRepository) GetConfigs() ([]*Config, error) {
return r.configs, nil
}
func (r *staticRepository) GetCutoffTimes() ([]*CutoffTime, error) {
return r.cutoffTimes, nil
}
func (r *staticRepository) GetFTPConfigs() ([]*FTPConfig, error) {
return r.ftpConfigs, nil
}
func (r *staticRepository) GetSFTPConfigs() ([]*SFTPConfig, error) {
return r.sftpConfigs, nil
}
func (r *staticRepository) Close() error {
return nil
}
func (r *staticRepository) upsertConfig(cfg *Config) error {
return nil
}
func (r *staticRepository) deleteConfig(routingNumber string) error {
return nil
}
func (r *staticRepository) upsertCutoffTime(routingNumber string, cutoff int, loc *time.Location) error {
return nil
}
func (r *staticRepository) deleteCutoffTime(routingNumber string) error {
return nil
}
func (r *staticRepository) upsertFTPConfigs(routingNumber, host, user, pass string) error {
return nil
}
func (r *staticRepository) deleteFTPConfig(routingNumber string) error {
return nil
}
func (r *staticRepository) upsertSFTPConfigs(routingNumber, host, user, pass, privateKey, publicKey string) error {
return nil
}
func (r *staticRepository) deleteSFTPConfig(routingNumber string) error {
return nil
}
// AddFileTransferConfigRoutes registers the admin HTTP routes for modifying file-transfer (uploading) configs.
func AddFileTransferConfigRoutes(logger log.Logger, svc *admin.Server, repo Repository) {
svc.AddHandler("/configs/uploads", GetConfigs(logger, repo))
svc.AddHandler("/configs/uploads/cutoff-times/{routingNumber}", manageCutoffTimeConfig(logger, repo))
svc.AddHandler("/configs/uploads/file-transfers/{routingNumber}", manageFileTransferConfig(logger, repo))
svc.AddHandler("/configs/uploads/ftp/{routingNumber}", manageFTPConfig(logger, repo))
svc.AddHandler("/configs/uploads/sftp/{routingNumber}", manageSFTPConfig(logger, repo))
}
func getRoutingNumber(r *http.Request) string {
rtn, ok := mux.Vars(r)["routingNumber"]
if !ok {
return ""
}
return rtn
}
type adminConfigResponse struct {
CutoffTimes []*CutoffTime `json:"CutoffTimes"`
FileTransferConfigs []*Config `json:"Configs"`
FTPConfigs []*FTPConfig `json:"FTPConfigs"`
SFTPConfigs []*SFTPConfig `json:"SFTPConfigs"`
}
// GetConfigs returns all configurations (i.e. FTP, cutoff times, file-transfer configs with passwords masked. (e.g. 'p******d')
func GetConfigs(logger log.Logger, repo Repository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
moovhttp.Problem(w, fmt.Errorf("unsupported HTTP verb %s", r.Method))
return
}
resp := &adminConfigResponse{}
if v, err := repo.GetCutoffTimes(); err != nil {
moovhttp.Problem(w, err)
return
} else {
resp.CutoffTimes = v
}
if v, err := repo.GetConfigs(); err != nil {
moovhttp.Problem(w, err)
return
} else {
resp.FileTransferConfigs = v
}
if v, err := repo.GetFTPConfigs(); err != nil {
moovhttp.Problem(w, err)
return
} else {
resp.FTPConfigs = maskFTPPasswords(v)
}
if v, err := repo.GetSFTPConfigs(); err != nil {
moovhttp.Problem(w, err)
return
} else {
resp.SFTPConfigs = maskSFTPPasswords(v)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(resp)
}
}
func maskPassword(s string) string {
if utf8.RuneCountInString(s) < 3 {
return "**" // too short, we can't mask anything
} else {
// turn 'password' into 'p******d'
first, last := s[0:1], s[len(s)-1:]
return fmt.Sprintf("%s%s%s", first, strings.Repeat("*", len(s)-2), last)
}
}
func maskFTPPasswords(cfgs []*FTPConfig) []*FTPConfig {
for i := range cfgs {
cfgs[i].Password = maskPassword(cfgs[i].Password)
}
return cfgs
}
func maskSFTPPasswords(cfgs []*SFTPConfig) []*SFTPConfig {
for i := range cfgs {
cfgs[i].Password = maskPassword(cfgs[i].Password)
}
return cfgs
}
func manageCutoffTimeConfig(logger log.Logger, repo Repository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
routingNumber := getRoutingNumber(r)
if routingNumber == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
switch r.Method {
case "PUT":
type request struct {
Cutoff int `json:"cutoff"`
Location string `json:"location"`
}
var req request
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
moovhttp.Problem(w, err)
return
}
if req.Cutoff == 0 {
moovhttp.Problem(w, errors.New("misisng cutoff"))
return
}
loc, err := time.LoadLocation(req.Location)
if err != nil {
moovhttp.Problem(w, fmt.Errorf("time: %s: %v", req.Location, err))
return
}
if err := repo.upsertCutoffTime(routingNumber, req.Cutoff, loc); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("updating cutoff time config routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
case "DELETE":
if err := repo.deleteCutoffTime(routingNumber); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("deleting cutoff time config routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
default:
moovhttp.Problem(w, fmt.Errorf("cutoff-times: unsupported HTTP verb %s", r.Method))
return
}
w.WriteHeader(http.StatusOK)
}
}
func manageFileTransferConfig(logger log.Logger, repo Repository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
routingNumber := getRoutingNumber(r)
if routingNumber == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
switch r.Method {
case "PUT":
type request struct {
InboundPath string `json:"inboundPath,omitempty"`
OutboundPath string `json:"outboundPath,omitempty"`
ReturnPath string `json:"returnPath,omitempty"`
OutboundFilenameTemplate string `json:"outboundFilenameTemplate,omitempty"`
}
var req request
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
moovhttp.Problem(w, err)
return
}
// Ensure that a provided template validates before saving it
if req.OutboundFilenameTemplate != "" {
if err := validateTemplate(req.OutboundFilenameTemplate); err != nil {
moovhttp.Problem(w, err)
return
}
}
err := repo.upsertConfig(&Config{
RoutingNumber: routingNumber,
InboundPath: req.InboundPath,
OutboundPath: req.OutboundPath,
ReturnPath: req.ReturnPath,
OutboundFilenameTemplate: req.OutboundFilenameTemplate,
})
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("updated config for routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
w.WriteHeader(http.StatusOK)
case "DELETE":
if err := repo.deleteConfig(routingNumber); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("deleted config for routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
w.WriteHeader(http.StatusOK)
default:
moovhttp.Problem(w, fmt.Errorf("unsupported HTTP verb %s", r.Method))
return
}
}
}
func manageFTPConfig(logger log.Logger, repo Repository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
routingNumber := getRoutingNumber(r)
if routingNumber == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
switch r.Method {
case "PUT":
type request struct {
Hostname string `json:"hostname"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
}
var req request
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
moovhttp.Problem(w, err)
return
}
if req.Hostname == "" || req.Username == "" {
moovhttp.Problem(w, errors.New("missing hostname, or username"))
return
}
if err := repo.upsertFTPConfigs(routingNumber, req.Hostname, req.Username, req.Password); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("updating FTP configs routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
case "DELETE":
if err := repo.deleteFTPConfig(routingNumber); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("deleting FTP config routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
default:
moovhttp.Problem(w, fmt.Errorf("unsupported HTTP verb %s", r.Method))
return
}
w.WriteHeader(http.StatusOK)
}
}
func manageSFTPConfig(logger log.Logger, repo Repository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
routingNumber := getRoutingNumber(r)
if routingNumber == "" {
w.WriteHeader(http.StatusBadRequest)
return
}
switch r.Method {
case "PUT":
type request struct {
Hostname string `json:"hostname"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
ClientPrivateKey string `json:"clientPrivateKey,omitempty"`
HostPublicKey string `json:"hostPublicKey,omitempty"`
}
var req request
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
moovhttp.Problem(w, err)
return
}
if req.Hostname == "" || req.Username == "" {
moovhttp.Problem(w, errors.New("missing hostname, or username"))
return
}
if err := repo.upsertSFTPConfigs(routingNumber, req.Hostname, req.Username, req.Password, req.ClientPrivateKey, req.HostPublicKey); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("updating SFTP config routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
case "DELETE":
if err := repo.deleteSFTPConfig(routingNumber); err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("file-transfer-configs", fmt.Sprintf("deleting SFTP cofnig routingNumber=%s", routingNumber), "requestID", moovhttp.GetRequestID(r))
default:
moovhttp.Problem(w, fmt.Errorf("unsupported HTTP verb %s", r.Method))
return
}
w.WriteHeader(http.StatusOK)
}
}
| [
"\"DEV_FILE_TRANSFER_TYPE\""
] | [] | [
"DEV_FILE_TRANSFER_TYPE"
] | [] | ["DEV_FILE_TRANSFER_TYPE"] | go | 1 | 0 | |
jmx/jmx.go | /*
Package jmx is a library to get metrics through JMX. It requires additional
setup. Read https://github.com/newrelic/infra-integrations-sdk#jmx-support for
instructions. */
package jmx
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/newrelic/infra-integrations-sdk/v4/log"
)
const (
jmxLineInitialBuffer = 4 * 1024 // initial 4KB per line, it'll be increased when required
cmdStdChanLen = 1000
)
// Error vars to ease Query response handling.
var (
ErrBeanPattern = errors.New("cannot parse MBean glob pattern, valid: 'DOMAIN:BEAN'")
ErrConnection = jmxClientError("jmx endpoint connection error")
// ErrJmxCmdRunning error returned when trying to Open and nrjmx command is still running
ErrJmxCmdRunning = errors.New("JMX tool is already running")
)
var cmd *exec.Cmd
var cancel context.CancelFunc
var cmdOut io.ReadCloser
var cmdError io.ReadCloser
var cmdIn io.WriteCloser
var cmdErrC = make(chan error, cmdStdChanLen)
var cmdWarnC = make(chan string, cmdStdChanLen)
var done sync.WaitGroup
// jmxClientError error is returned when the nrjmx tool can not continue
type jmxClientError string
func (j jmxClientError) Error() string {
return string(j)
}
// IsJmxClientError identify if the error is jmx client error type
func IsJmxClientError(err error) bool {
_, ok := err.(jmxClientError)
return ok
}
// connectionConfig is the configuration for the nrjmx command.
type connectionConfig struct {
connectionURL string
hostname string
port string
uriPath string
username string
password string
keyStore string
keyStorePassword string
trustStore string
trustStorePassword string
remote bool
remoteJBossStandalone bool
executablePath string
verbose bool
}
func (cfg *connectionConfig) isSSL() bool {
return cfg.keyStore != "" && cfg.keyStorePassword != "" && cfg.trustStore != "" && cfg.trustStorePassword != ""
}
func (cfg *connectionConfig) command() []string {
var c []string
if os.Getenv("NR_JMX_TOOL") != "" {
c = strings.Split(os.Getenv("NR_JMX_TOOL"), " ")
} else {
c = []string{cfg.executablePath}
}
if cfg.connectionURL != "" {
c = append(c, "--connURL", cfg.connectionURL)
} else {
c = append(c, "--hostname", cfg.hostname, "--port", cfg.port)
if cfg.uriPath != "" {
c = append(c, "--uriPath", cfg.uriPath)
}
if cfg.remote {
c = append(c, "--remote")
}
if cfg.remoteJBossStandalone {
c = append(c, "--remoteJBossStandalone")
}
}
if cfg.username != "" && cfg.password != "" {
c = append(c, "--username", cfg.username, "--password", cfg.password)
}
if cfg.isSSL() {
c = append(c, "--keyStore", cfg.keyStore, "--keyStorePassword", cfg.keyStorePassword, "--trustStore", cfg.trustStore, "--trustStorePassword", cfg.trustStorePassword)
}
if cfg.verbose {
c = append(c, "--verbose")
}
return c
}
// OpenNoAuth executes a nrjmx command without user/pass using the given options.
func OpenNoAuth(hostname, port string, opts ...Option) error {
return Open(hostname, port, "", "", opts...)
}
// OpenURL executes a nrjmx command using the provided full connection URL and options.
func OpenURL(connectionURL, username, password string, opts ...Option) error {
opts = append(opts, WithConnectionURL(connectionURL))
return Open("", "", username, password, opts...)
}
// Open executes a nrjmx command using the given options.
func Open(hostname, port, username, password string, opts ...Option) error {
config := &connectionConfig{
hostname: hostname,
port: port,
username: username,
password: password,
executablePath: filepath.Clean(defaultNrjmxExec),
}
for _, opt := range opts {
opt(config)
}
return openConnection(config)
}
// Option sets an option on integration level.
type Option func(config *connectionConfig)
// WithNrJmxTool for specifying non standard `nrjmx` tool executable location.
// Has less precedence than `NR_JMX_TOOL` environment variable.
func WithNrJmxTool(executablePath string) Option {
return func(config *connectionConfig) {
config.executablePath = executablePath
}
}
// WithURIPath for specifying non standard(jmxrmi) path on jmx service uri
func WithURIPath(uriPath string) Option {
return func(config *connectionConfig) {
config.uriPath = uriPath
}
}
// WithConnectionURL for specifying non standard(jmxrmi) path on jmx service uri
func WithConnectionURL(connectionURL string) Option {
return func(config *connectionConfig) {
config.connectionURL = connectionURL
}
}
// WithVerbose enables verbose mode for nrjmx.
func WithVerbose() Option {
return func(config *connectionConfig) {
config.verbose = true
}
}
// WithSSL for SSL connection configuration.
func WithSSL(keyStore, keyStorePassword, trustStore, trustStorePassword string) Option {
return func(config *connectionConfig) {
config.keyStore = keyStore
config.keyStorePassword = keyStorePassword
config.trustStore = trustStore
config.trustStorePassword = trustStorePassword
}
}
// WithRemoteProtocol uses the remote JMX protocol URL (by default on JBoss Domain-mode).
func WithRemoteProtocol() Option {
return func(config *connectionConfig) {
config.remote = true
}
}
// WithRemoteStandAloneJBoss uses the remote JMX protocol URL on JBoss Standalone-mode.
func WithRemoteStandAloneJBoss() Option {
return func(config *connectionConfig) {
config.remote = true
config.remoteJBossStandalone = true
}
}
func openConnection(config *connectionConfig) (err error) {
if cmd != nil {
return ErrJmxCmdRunning
}
// Drain error channel to prevent showing past errors
cmdErrC = make(chan error, cmdStdChanLen)
cmdWarnC = make(chan string, cmdStdChanLen)
done.Add(1)
var ctx context.Context
cliCommand := config.command()
ctx, cancel = context.WithCancel(context.Background())
cmd = exec.CommandContext(ctx, cliCommand[0], cliCommand[1:]...)
if cmdOut, err = cmd.StdoutPipe(); err != nil {
return err
}
if cmdIn, err = cmd.StdinPipe(); err != nil {
return err
}
if cmdError, err = cmd.StderrPipe(); err != nil {
return err
}
go handleStdErr(ctx)
if err = cmd.Start(); err != nil {
return err
}
go func() {
if err = cmd.Wait(); err != nil {
cmdErrC <- jmxClientError(fmt.Sprintf("nrjmx error: %s [proc-state: %s]", err, cmd.ProcessState))
}
cmd = nil
done.Done()
}()
return nil
}
func handleStdErr(ctx context.Context) {
scanner := bufio.NewReaderSize(cmdError, jmxLineInitialBuffer)
var line string
var err error
for {
select {
case <-ctx.Done():
return
default:
break
}
line, err = scanner.ReadString('\n')
// API needs re to allow stderr full read before closing
if err != nil && err != io.EOF && !strings.Contains(err.Error(), "file already closed") {
log.Error(fmt.Sprintf("error reading stderr from JMX tool: %s", err.Error()))
}
if strings.HasPrefix(line, "WARNING") {
msg := line[7:]
if strings.Contains(msg, "Can't parse bean name") {
cmdErrC <- fmt.Errorf("%w: %s", ErrBeanPattern, msg)
return
}
cmdWarnC <- msg
}
if strings.HasPrefix(line, "SEVERE:") {
msg := line[7:]
if strings.Contains(msg, "jmx connection error") {
cmdErrC <- fmt.Errorf("%w: %s", ErrConnection, msg)
} else {
cmdErrC <- errors.New(msg)
}
return
}
if err != nil {
cmdErrC <- err
return
}
}
}
// Close will finish the underlying nrjmx application by closing its standard
// input and canceling the execution afterwards to clean-up.
func Close() {
if cancel != nil {
cancel()
}
done.Wait()
}
func doQuery(ctx context.Context, out chan []byte, queryErrC chan error, queryString []byte) {
if _, err := cmdIn.Write(queryString); err != nil {
queryErrC <- fmt.Errorf("writing nrjmx stdin: %s", err.Error())
return
}
scanner := bufio.NewReaderSize(cmdOut, jmxLineInitialBuffer)
var b []byte
var err error
for {
select {
case <-ctx.Done():
return
default:
break
}
b, err = scanner.ReadBytes('\n')
if err != nil && err != io.EOF {
queryErrC <- fmt.Errorf("reading nrjmx stdout: %s", err.Error())
}
out <- b
return
}
}
// Query executes JMX query against nrjmx tool waiting up to timeout (in milliseconds)
func Query(objectPattern string, timeoutMillis int) (result map[string]interface{}, err error) {
ctx, cancelFn := context.WithCancel(context.Background())
lineCh := make(chan []byte, cmdStdChanLen)
queryErrors := make(chan error, cmdStdChanLen)
outTimeout := time.Duration(timeoutMillis) * time.Millisecond
// Send the query async to the underlying process so we can timeout it
go doQuery(ctx, lineCh, queryErrors, []byte(fmt.Sprintf("%s\n", objectPattern)))
return receiveResult(lineCh, queryErrors, cancelFn, objectPattern, outTimeout)
}
// receiveResult checks for channels to receive result from nrjmx command.
func receiveResult(lineC chan []byte, queryErrC chan error, cancelFn context.CancelFunc, objectPattern string, timeout time.Duration) (result map[string]interface{}, err error) {
defer logAvailableWarnings(cmdWarnC)
var warn string
for {
select {
case line := <-lineC:
if len(line) == 0 {
cancelFn()
log.Warn(fmt.Sprintf("empty result for query: %s", objectPattern))
continue
}
var r map[string]interface{}
if err = json.Unmarshal(line, &r); err != nil {
err = fmt.Errorf("invalid return value for query: %s, error: %w, line: %q", objectPattern, err, line)
return
}
if result == nil {
result = make(map[string]interface{})
}
for k, v := range r {
result[k] = v
}
return
case warn = <-cmdWarnC:
log.Warn(warn)
case err = <-cmdErrC:
return
case err = <-queryErrC:
return
case <-time.After(timeout):
// In case of timeout, we want to close the command to avoid mixing up results coming up latter
cancelFn()
Close()
err = fmt.Errorf("timeout waiting for query: %s", objectPattern)
return
}
}
}
func logAvailableWarnings(channel chan string) {
var warn string
for {
select {
case warn = <-channel:
log.Warn(warn)
default:
return
}
}
}
| [
"\"NR_JMX_TOOL\"",
"\"NR_JMX_TOOL\""
] | [] | [
"NR_JMX_TOOL"
] | [] | ["NR_JMX_TOOL"] | go | 1 | 0 | |
aioworker/__init__.py | from .worker import Worker # noqa: F401
__all__ = ["Worker"]
__version__ = "0.2.0"
| [] | [] | [] | [] | [] | python | null | null | null |
providers/dns/dynu/dynu_test.go | package dynu
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
dynuLiveTest bool
dynuCustomerName string
dynuUserName string
dynuPassword string
dynuDomain string
)
func init() {
dynuCustomerName = os.Getenv("DYNU_CUSTOMER_NAME")
dynuUserName = os.Getenv("DYNU_USER_NAME")
dynuPassword = os.Getenv("DYNU_PASSWORD")
dynuDomain = os.Getenv("DYNU_DOMAIN")
if len(dynuCustomerName) > 0 && len(dynuUserName) > 0 && len(dynuPassword) > 0 && len(dynuDomain) > 0 {
dynuLiveTest = true
}
}
func TestLiveDynPresent(t *testing.T) {
if !dynuLiveTest {
t.Skip("skipping live test")
}
provider, err := NewDNSProvider()
assert.NoError(t, err)
err = provider.Present(dynuDomain, "", "123d==")
assert.NoError(t, err)
}
func TestLiveDynCleanUp(t *testing.T) {
if !dynuLiveTest {
t.Skip("skipping live test")
}
time.Sleep(time.Second * 1)
provider, err := NewDNSProvider()
assert.NoError(t, err)
err = provider.CleanUp(dynuDomain, "", "123d==")
assert.NoError(t, err)
}
| [
"\"DYNU_CUSTOMER_NAME\"",
"\"DYNU_USER_NAME\"",
"\"DYNU_PASSWORD\"",
"\"DYNU_DOMAIN\""
] | [] | [
"DYNU_PASSWORD",
"DYNU_CUSTOMER_NAME",
"DYNU_USER_NAME",
"DYNU_DOMAIN"
] | [] | ["DYNU_PASSWORD", "DYNU_CUSTOMER_NAME", "DYNU_USER_NAME", "DYNU_DOMAIN"] | go | 4 | 0 | |
reflector.go | package main
import (
"flag"
"log"
"net/http"
"os"
)
func main() {
var port string
if os.Getenv("HTTP_PLATFORM_PORT") != "" {
port = os.Getenv("HTTP_PLATFORM_PORT")
}
if port == "" {
flag.StringVar(&port, "p", "8880", "Port to listen on. Default port: 8880")
flag.Parse()
}
if port == "" {
log.Fatal("error - can not get listening port details\n")
}
http.HandleFunc("/", reflectHandler)
err := http.ListenAndServe(":"+port, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
| [
"\"HTTP_PLATFORM_PORT\"",
"\"HTTP_PLATFORM_PORT\""
] | [] | [
"HTTP_PLATFORM_PORT"
] | [] | ["HTTP_PLATFORM_PORT"] | go | 1 | 0 | |
venv/Lib/site-packages/pip/_internal/download.py | from __future__ import absolute_import
import cgi
import email.utils
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
from pip._vendor import requests, urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.utils import get_netrc_auth
# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import
from pip._vendor.six.moves import xmlrpc_client # type: ignore
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip._internal.exceptions import HashMismatch, InstallationError
from pip._internal.models.index import PyPI
# Import ssl from compat so the initial import occurs in only one place.
from pip._internal.utils.compat import HAS_TLS, ssl
from pip._internal.utils.encoding import auto_decode
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.glibc import libc_ver
from pip._internal.utils.marker_files import write_delete_marker_file
from pip._internal.utils.misc import (
ARCHIVE_EXTENSIONS, ask, ask_input, ask_password, ask_path_exists,
backup_dir, consume, display_path, format_size, get_installed_version,
path_to_url, remove_auth_from_url, rmtree, split_auth_netloc_from_url,
splitext, unpack_file,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import DownloadProgressProvider
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Optional, Tuple, Dict, IO, Text, Union
)
from optparse import Values
from pip._internal.models.link import Link
from pip._internal.utils.hashes import Hashes
from pip._internal.vcs.versioncontrol import AuthInfo, VersionControl
Credentials = Tuple[str, str, str]
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url',
'parse_content_disposition', 'sanitize_content_filename']
logger = logging.getLogger(__name__)
try:
import keyring # noqa
except ImportError:
keyring = None
except Exception as exc:
logger.warning("Keyring is skipped due to an exception: %s",
str(exc))
keyring = None
# These are environment variables present when running under various
# CI systems. For each variable, some CI systems that use the variable
# are indicated. The collection was chosen so that for each of a number
# of popular systems, at least one of the environment variables is used.
# This list is used to provide some indication of and lower bound for
# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
# For more background, see: https://github.com/pypa/pip/issues/5499
CI_ENVIRONMENT_VARIABLES = (
# Azure Pipelines
'BUILD_BUILDID',
# Jenkins
'BUILD_ID',
# AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
'CI',
# Explicit environment variable.
'PIP_IS_CI',
)
def looks_like_ci():
# type: () -> bool
"""
Return whether it looks like pip is running under CI.
"""
# We don't use the method of checking for a tty (e.g. using isatty())
# because some CI systems mimic a tty (e.g. Travis CI). Thus that
# method doesn't provide definitive information in either direction.
return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
if HAS_TLS:
data["openssl_version"] = ssl.OPENSSL_VERSION
setuptools_version = get_installed_version("setuptools")
if setuptools_version is not None:
data["setuptools_version"] = setuptools_version
# Use None rather than False so as not to give the impression that
# pip knows it is not being run under CI. Rather, it is a null or
# inconclusive result. Also, we include some value rather than no
# value to make it easier to know that the check has been run.
data["ci"] = True if looks_like_ci() else None
user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
if user_data is not None:
data["user_data"] = user_data
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
def _get_keyring_auth(url, username):
"""Return the tuple auth for a given url from keyring."""
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning("Keyring is skipped due to an exception: %s",
str(exc))
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
# type: (bool, Optional[Values]) -> None
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {} # type: Dict[str, AuthInfo]
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save = None # type: Optional[Credentials]
def _get_index_url(self, url):
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
def _get_new_credentials(self, original_url, allow_netrc=True,
allow_keyring=True):
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
kr_auth = (_get_keyring_auth(index_url, username) or
_get_keyring_auth(netloc, username))
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(self, original_url):
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
if username is None and password is None:
# No stored credentials. Acquire new credentials without prompting
# the user. (e.g. from netrc, keyring, or the URL itself)
username, password = self._get_new_credentials(original_url)
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None) or
# Credentials were not found
(username is None and password is None)
), "Could not load credentials from url: {}".format(original_url)
return url, username, password
def __call__(self, req):
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(self, netloc):
username = ask_input("User for %s: " % netloc)
if not username:
return None, None
auth = _get_keyring_auth(netloc, username)
if auth:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self):
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning('401 Error, Credentials not correct for %s',
resp.request.url)
def save_credentials(self, resp, **kwargs):
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None # type: Optional[int]
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
index_urls = kwargs.pop("index_urls", None)
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth(index_urls=index_urls)
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
# A 500 may indicate transient error in Amazon S3
# A 520 or 527 - may indicate transient error in CloudFlare
status_forcelist=[500, 503, 520, 527],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
# Save this for later use in add_insecure_host().
self._insecure_adapter = insecure_adapter
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.add_insecure_host(host)
def add_insecure_host(self, host):
# type: (str) -> None
self.mount('https://{}/'.format(host), self._insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
# type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text]
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
:param url: File path or url.
:param comes_from: Origin description of requirements.
:param session: Instance of pip.download.PipSession.
"""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
# type: (Union[str, Text]) -> bool
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
# type: (str) -> str
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
if not netloc or netloc == 'localhost':
# According to RFC 8089, same as empty authority.
netloc = ''
elif sys.platform == 'win32':
# If we have a UNC path, prepend UNC share notation.
netloc = '\\\\' + netloc
else:
raise ValueError(
'non-local file URIs are not supported on this platform: %r'
% url
)
path = urllib_request.url2pathname(netloc + path)
return path
def is_archive_file(name):
# type: (str) -> bool
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location, url=link.url)
def _get_used_vcs_backend(link):
# type: (Link) -> Optional[VersionControl]
"""
Return a VersionControl object or None.
"""
for vcs_backend in vcs.backends:
if link.scheme in vcs_backend.schemes:
return vcs_backend
return None
def is_vcs_url(link):
# type: (Link) -> bool
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
# type: (Link) -> bool
return link.url.lower().startswith('file:')
def is_dir_url(link):
# type: (Link) -> bool
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(
resp, # type: Response
link, # type: Link
content_file, # type: IO
hashes, # type: Optional[Hashes]
progress_bar # type: str
):
# type: (...) -> None
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
progress_indicator = DownloadProgressProvider(progress_bar,
max=total_length)
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
else:
logger.info("Downloading %s", url)
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(
link, # type: Link
location, # type: str
download_dir=None, # type: Optional[str]
session=None, # type: Optional[PipSession]
hashes=None, # type: Optional[Hashes]
progress_bar="on" # type: str
):
# type: (...) -> None
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
with TempDirectory(kind="unpack") as temp_dir:
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir.path,
hashes,
progress_bar)
# unpack the archive to the build dir location. even when only
# downloading archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
def unpack_file_url(
link, # type: Link
location, # type: str
download_dir=None, # type: Optional[str]
hashes=None # type: Optional[Hashes]
):
# type: (...) -> None
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(
link, # type: Link
location, # type: str
download_dir=None, # type: Optional[str]
only_download=False, # type: bool
session=None, # type: Optional[PipSession]
hashes=None, # type: Optional[Hashes]
progress_bar="on" # type: str
):
# type: (...) -> None
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes,
progress_bar=progress_bar
)
if only_download:
write_delete_marker_file(location)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _download_http_url(
link, # type: Link
session, # type: PipSession
temp_dir, # type: str
hashes, # type: Optional[Hashes]
progress_bar # type: str
):
# type: (...) -> Tuple[str, str]
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes, progress_bar)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
# type: (Link, str, Optional[Hashes]) -> Optional[str]
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| [] | [] | [
"PIP_USER_AGENT_USER_DATA"
] | [] | ["PIP_USER_AGENT_USER_DATA"] | python | 1 | 0 | |
appdata_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/massgrid/btcutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] | [] | [
"APPDATA",
"LOCALAPPDATA"
] | [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
args.go | package main
import (
"flag"
"os"
"github.com/pkg/errors"
)
type args struct {
user string
hostList string
scriptDir string
command string
pw bool
noPassword bool
verbose bool
}
func arguments() args {
var args args
flag.StringVar(&args.user, "user", os.Getenv("USER"), "ssh username")
flag.StringVar(&args.hostList, "hosts", "", "the list of hosts")
flag.StringVar(&args.scriptDir, "scripts", "", "the directory full of scripts")
flag.StringVar(&args.command, "command", "", "the command to run")
flag.BoolVar(&args.pw, "pw", false, "send password on stdin after running --command")
flag.BoolVar(&args.noPassword, "no-password", false, "no-password skips password prompt")
flag.BoolVar(&args.verbose, "verbose", false, "verbose mode")
flag.Parse()
return args
}
func validate(args args) error {
if args.hostList == "" {
return errors.Errorf("--hosts is required")
}
if args.user == "" {
return errors.Errorf("--user or $USER must be set")
}
if args.scriptDir == "" && args.command == "" {
return errors.Errorf("--scripts or --command is required")
}
if args.scriptDir != "" && args.command != "" {
return errors.Errorf("only one of --scripts or --command allowed")
}
if args.command == "" && args.pw {
return errors.Errorf("--pw only allowed in conjunction with --command")
}
return nil
}
| [
"\"USER\""
] | [] | [
"USER"
] | [] | ["USER"] | go | 1 | 0 | |
functions/helloworld/hello_http_system_test.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
// Disabled until system tests are working on Kokoro.
// [START functions_http_system_test]
package helloworld
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"testing"
"time"
)
const RuntimeVersion = "go113"
func TestMain(m *testing.M) {
// Only run end-to-end tests when configured to do so.
if os.Getenv("GOLANG_SAMPLES_E2E_TEST") == "" {
log.Println("Skipping end-to-end tests: GOLANG_SAMPLES_E2E_TEST not set")
os.Exit(m.Run())
}
retn, err := setupAndRun(m)
if err != nil {
log.Fatal(err)
}
os.Exit(retn)
}
func setupAndRun(m *testing.M) (int, error) {
entryPoint := "HelloHTTP"
name := entryPoint + "-" + time.Now().Format("20060102-150405")
// Setup function for tests.
cmd := exec.Command("gcloud", "functions", "deploy", name,
"--entry-point="+entryPoint,
"--runtime="+RuntimeVersion,
"--allow-unauthenticated",
"--trigger-http",
)
log.Printf("Running: %s %s", cmd.Path, strings.Join(cmd.Args, " "))
if _, err := cmd.Output(); err != nil {
log.Println(string(err.(*exec.ExitError).Stderr))
return 1, fmt.Errorf("Setup: Deploy function: %w", err)
}
// Tear down the deployed function.
defer func() {
cmd = exec.Command("gcloud", "functions", "delete", name)
log.Printf("Running: %s %s", cmd.Path, strings.Join(cmd.Args, " "))
if _, err := cmd.Output(); err != nil {
log.Println(string(err.(*exec.ExitError).Stderr))
log.Printf("Teardown: Delete function: %v", err)
}
}()
// Retrieve URL for tests.
cmd = exec.Command("gcloud", "functions", "describe", name, "--format=value(httpsTrigger.url)")
log.Printf("Running: %s %s", cmd.Path, strings.Join(cmd.Args, " "))
out, err := cmd.Output()
if err != nil {
log.Println(string(err.(*exec.ExitError).Stderr))
return 1, fmt.Errorf("Setup: Get function URL: %w", err)
}
if err := os.Setenv("BASE_URL", strings.TrimSpace(string(out))); err != nil {
return 1, fmt.Errorf("Setup: os.Setenv: %w", err)
}
// Run the tests.
return m.Run(), nil
}
func TestHelloHTTPSystem(t *testing.T) {
client := http.Client{
Timeout: 10 * time.Second,
}
urlString := os.Getenv("BASE_URL")
testURL, err := url.Parse(urlString)
if err != nil {
t.Fatalf("url.Parse(%q): %v", urlString, err)
}
tests := []struct {
body string
want string
}{
{body: `{"name": ""}`, want: "Hello, World!"},
{body: `{"name": "Gopher"}`, want: "Hello, Gopher!"},
}
for _, test := range tests {
req := &http.Request{
Method: http.MethodPost,
Body: ioutil.NopCloser(strings.NewReader(test.body)),
URL: testURL,
}
resp, err := client.Do(req)
if err != nil {
t.Fatalf("HelloHTTP http.Get: %v", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("HelloHTTP ioutil.ReadAll: %v", err)
}
if got := string(body); got != test.want {
t.Errorf("HelloHTTP(%q) = %q, want %q", test.body, got, test.want)
}
}
}
// [END functions_http_system_test]
| [
"\"GOLANG_SAMPLES_E2E_TEST\"",
"\"BASE_URL\""
] | [] | [
"GOLANG_SAMPLES_E2E_TEST",
"BASE_URL"
] | [] | ["GOLANG_SAMPLES_E2E_TEST", "BASE_URL"] | go | 2 | 0 | |
deployment/slick.example.wsgi | #!/usr/bin/python2.7
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
if os.path.exists(os.path.join(os.path.dirname(__file__), 'vpy', 'bin', 'activate_this.py')):
activate_this = os.path.join(os.path.dirname(__file__), 'vpy', 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ['SLICK_SETTINGS'] = os.path.join(os.path.dirname(__file__), 'prodserver.cfg')
os.chdir(os.path.dirname(__file__))
from slickqaweb.main import app as application
| [] | [] | [
"SLICK_SETTINGS"
] | [] | ["SLICK_SETTINGS"] | python | 1 | 0 | |
main.go | package main
import (
"bytes"
"flag"
"fmt"
"log"
"os"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/dustin/go-humanize"
"github.com/pyed/tailer"
"github.com/pyed/transmission"
"gopkg.in/telegram-bot-api.v4"
)
const (
VERSION = "v1.4.1"
HELP = `
*list* or *li* or *ls*
Lists all the torrents, takes an optional argument which is a query to list only torrents that has a tracker matches the query, or some of it.
*head* or *he*
Lists the first n number of torrents, n defaults to 5 if no argument is provided.
*tail* or *ta*
Lists the last n number of torrents, n defaults to 5 if no argument is provided.
*down* or *dl*
Lists torrents with the status of _Downloading_ or in the queue to download.
*seeding* or *sd*
Lists torrents with the status of _Seeding_ or in the queue to seed.
*paused* or *pa*
Lists _Paused_ torrents.
*checking* or *ch*
Lists torrents with the status of _Verifying_ or in the queue to verify.
*active* or *ac*
Lists torrents that are actively uploading or downloading.
*errors* or *er*
Lists torrents with with errors along with the error message.
*sort* or *so*
Manipulate the sorting of the aforementioned commands. Call it without arguments for more.
*trackers* or *tr*
Lists all the trackers along with the number of torrents.
*add* or *ad*
Takes one or many URLs or magnets to add them. You can send a ".torrent" file via Telegram to add it.
*search* or *se*
Takes a query and lists torrents with matching names.
*latest* or *la*
Lists the newest n torrents, n defaults to 5 if no argument is provided.
*info* or *in*
Takes one or more torrent's IDs to list more info about them.
*stop* or *sp*
Takes one or more torrent's IDs to stop them, or _all_ to stop all torrents.
*start* or *st*
Takes one or more torrent's IDs to start them, or _all_ to start all torrents.
*check* or *ck*
Takes one or more torrent's IDs to verify them, or _all_ to verify all torrents.
*del* or *rm*
Takes one or more torrent's IDs to delete them.
*deldata*
Takes one or more torrent's IDs to delete them and their data.
*stats* or *sa*
Shows Transmission's stats.
*speed* or *ss*
Shows the upload and download speeds.
*count* or *co*
Shows the torrents counts per status.
*help*
Shows this help message.
*version* or *ver*
Shows version numbers.
- Prefix commands with '/' if you want to talk to your bot in a group.
- report any issues [here](https://github.com/pyed/transmission-telegram)
`
)
var (
// flags
BotToken string
Masters masterSlice
RPCURL string
Username string
Password string
LogFile string
TransLogFile string // Transmission log file
NoLive bool
// transmission
Client *transmission.TransmissionClient
// telegram
Bot *tgbotapi.BotAPI
Updates <-chan tgbotapi.Update
// chatID will be used to keep track of which chat to send completion notifictions.
chatID int64
// logging
logger = log.New(os.Stdout, "", log.LstdFlags)
// interval in seconds for live updates, affects: "active", "info", "speed", "head", "tail"
interval time.Duration = 5
// duration controls how many intervals will happen
duration = 10
// since telegram's markdown can't be escaped, we have to replace some chars
// affects only markdown users: info, active, head, tail
mdReplacer = strings.NewReplacer("*", "•",
"[", "(",
"]", ")",
"_", "-",
"`", "'")
)
// we need a type for masters for the flag package to parse them as a slice
type masterSlice []string
// String is mandatory functions for the flag package
func (masters *masterSlice) String() string {
return fmt.Sprintf("%s", *masters)
}
// Set is mandatory functions for the flag package
func (masters *masterSlice) Set(master string) error {
*masters = append(*masters, strings.ToLower(master))
return nil
}
// Contains takes a string and return true of masterSlice has it
func (masters masterSlice) Contains(master string) bool {
master = strings.ToLower(master)
for i := range masters {
if masters[i] == master {
return true
}
}
return false
}
// init flags
func init() {
// define arguments and parse them.
flag.StringVar(&BotToken, "token", "", "Telegram bot token, Can be passed via environment variable 'TT_BOTT'")
flag.Var(&Masters, "master", "Your telegram handler, So the bot will only respond to you. Can specify more than one")
flag.StringVar(&RPCURL, "url", "http://localhost:9091/transmission/rpc", "Transmission RPC URL")
flag.StringVar(&Username, "username", "", "Transmission username")
flag.StringVar(&Password, "password", "", "Transmission password")
flag.StringVar(&LogFile, "logfile", "", "Send logs to a file")
flag.StringVar(&TransLogFile, "transmission-logfile", "", "Open transmission logfile to monitor torrents completion")
flag.BoolVar(&NoLive, "no-live", false, "Don't edit and update info after sending")
// set the usage message
flag.Usage = func() {
fmt.Fprint(os.Stderr, "Usage: transmission-telegram <-token=TOKEN> <-master=@tuser> [-master=@yuser2] [-url=http://] [-username=user] [-password=pass]\n\n")
flag.PrintDefaults()
}
flag.Parse()
// if we don't have BotToken passed, check the environment variable "TT_BOTT"
if BotToken == "" {
if token := os.Getenv("TT_BOTT"); len(token) > 1 {
BotToken = token
}
}
// make sure that we have the two madatory arguments: telegram token & master's handler.
if BotToken == "" ||
len(Masters) < 1 {
fmt.Fprintf(os.Stderr, "Error: Mandatory argument missing! (-token or -master)\n\n")
flag.Usage()
os.Exit(1)
}
// make sure that the handler doesn't contain @
for i := range Masters {
Masters[i] = strings.Replace(Masters[i], "@", "", -1)
}
// if we got a log file, log to it
if LogFile != "" {
logf, err := os.OpenFile(LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatal(err)
}
logger.SetOutput(logf)
}
// if we got a transmission log file, monitor it for torrents completion to notify upon them.
if TransLogFile != "" {
go func() {
ft := tailer.RunFileTailer(TransLogFile, false, nil)
// [2017-02-22 21:00:00.898] File-Name State changed from "Incomplete" to "Complete" (torrent.c:2218)
const (
substring = `"Incomplete" to "Complete"`
start = len(`[2017-02-22 21:00:00.898] `)
end = len(` State changed from "Incomplete" to "Complete" (torrent.c:2218)`)
)
for {
select {
case line := <-ft.Lines():
if strings.Contains(line, substring) {
// if we don't have a chatID continue
if chatID == 0 {
continue
}
msg := fmt.Sprintf("Completed: %s", line[start:len(line)-end])
send(msg, chatID, false)
}
case err := <-ft.Errors():
logger.Printf("[ERROR] tailing transmission log: %s", err)
return
}
}
}()
}
// if the `-username` flag isn't set, look into the environment variable 'TR_AUTH'
if Username == "" {
if values := strings.Split(os.Getenv("TR_AUTH"), ":"); len(values) > 1 {
Username, Password = values[0], values[1]
}
}
// log the flags
logger.Printf("[INFO] Token=%s\n\t\tMasters=%s\n\t\tURL=%s\n\t\tUSER=%s\n\t\tPASS=%s",
BotToken, Masters, RPCURL, Username, Password)
}
// init transmission
func init() {
var err error
Client, err = transmission.New(RPCURL, Username, Password)
if err != nil {
fmt.Fprintf(os.Stderr, "[ERROR] Transmission: Make sure you have the right URL, Username and Password\n")
os.Exit(1)
}
}
// init telegram
func init() {
// authorize using the token
var err error
Bot, err = tgbotapi.NewBotAPI(BotToken)
if err != nil {
fmt.Fprintf(os.Stderr, "[ERROR] Telegram: %s\n", err)
os.Exit(1)
}
logger.Printf("[INFO] Authorized: %s", Bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
Updates, err = Bot.GetUpdatesChan(u)
if err != nil {
fmt.Fprintf(os.Stderr, "[ERROR] Telegram: %s\n", err)
os.Exit(1)
}
}
func main() {
for update := range Updates {
// ignore edited messages
if update.Message == nil {
continue
}
// ignore non masters
if !Masters.Contains(update.Message.From.UserName) {
logger.Printf("[INFO] Ignored a message from: %s", update.Message.From.String())
continue
}
// update chatID for complete notification
if TransLogFile != "" && chatID != update.Message.Chat.ID {
chatID = update.Message.Chat.ID
}
// tokenize the update
tokens := strings.Split(update.Message.Text, " ")
// preprocess message based on URL schema
// in case those were added from the mobile via "Share..." option
// when it is not possible to easily prepend it with "add" command
if strings.HasPrefix(tokens[0], "magnet") || strings.HasPrefix(tokens[0], "http") {
tokens = append([]string{"add"}, tokens...)
}
command := strings.ToLower(tokens[0])
switch command {
case "list", "/list", "li", "/li", "/ls", "ls":
go list(update, tokens[1:])
case "head", "/head", "he", "/he":
go head(update, tokens[1:])
case "tail", "/tail", "ta", "/ta":
go tail(update, tokens[1:])
case "downs", "/downs", "dl", "/dl":
go downs(update)
case "seeding", "/seeding", "sd", "/sd":
go seeding(update)
case "paused", "/paused", "pa", "/pa":
go paused(update)
case "checking", "/checking", "ch", "/ch":
go checking(update)
case "active", "/active", "ac", "/ac":
go active(update)
case "errors", "/errors", "er", "/er":
go errors(update)
case "sort", "/sort", "so", "/so":
go sort(update, tokens[1:])
case "trackers", "/trackers", "tr", "/tr":
go trackers(update)
case "add", "/add", "ad", "/ad":
go add(update, tokens[1:])
case "search", "/search", "se", "/se":
go search(update, tokens[1:])
case "latest", "/latest", "la", "/la":
go latest(update, tokens[1:])
case "info", "/info", "in", "/in":
go info(update, tokens[1:])
case "stop", "/stop", "sp", "/sp":
go stop(update, tokens[1:])
case "start", "/start", "st", "/st":
go start(update, tokens[1:])
case "check", "/check", "ck", "/ck":
go check(update, tokens[1:])
case "stats", "/stats", "sa", "/sa":
go stats(update)
case "speed", "/speed", "ss", "/ss":
go speed(update)
case "count", "/count", "co", "/co":
go count(update)
case "del", "/del", "rm", "/rm":
go del(update, tokens[1:])
case "deldata", "/deldata":
go deldata(update, tokens[1:])
case "help", "/help":
go send(HELP, update.Message.Chat.ID, true)
case "version", "/version", "ver", "/ver":
go getVersion(update)
case "":
// might be a file received
go receiveTorrent(update)
default:
// no such command, try help
go send("No such command, try /help", update.Message.Chat.ID, false)
}
}
}
// list will form and send a list of all the torrents
// takes an optional argument which is a query to match against trackers
// to list only torrents that has a tracker that matchs.
func list(ud tgbotapi.Update, tokens []string) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*list:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
// if it gets a query, it will list torrents that has trackers that match the query
if len(tokens) != 0 {
// (?i) for case insensitivity
regx, err := regexp.Compile("(?i)" + tokens[0])
if err != nil {
send("*list:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
for i := range torrents {
if regx.MatchString(torrents[i].GetTrackers()) {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
}
} else { // if we did not get a query, list all torrents
for i := range torrents {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
}
if buf.Len() == 0 {
// if we got a tracker query show different message
if len(tokens) != 0 {
send(fmt.Sprintf("*list:* No tracker matches: *%s*", tokens[0]), ud.Message.Chat.ID, true)
return
}
send("*list:* no torrents", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// head will list the first 5 or n torrents
func head(ud tgbotapi.Update, tokens []string) {
var (
n = 5 // default to 5
err error
)
if len(tokens) > 0 {
n, err = strconv.Atoi(tokens[0])
if err != nil {
send("*head:* argument must be a number", ud.Message.Chat.ID, false)
return
}
}
torrents, err := Client.GetTorrents()
if err != nil {
send("*head:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
// make sure that we stay in the boundaries
if n <= 0 || n > len(torrents) {
n = len(torrents)
}
buf := new(bytes.Buffer)
for i := range torrents[:n] {
torrentName := mdReplacer.Replace(torrents[i].Name) // escape markdown
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrents[i].ID, torrentName, torrents[i].TorrentStatus(), humanize.Bytes(torrents[i].Have()),
humanize.Bytes(torrents[i].SizeWhenDone), torrents[i].PercentDone*100, humanize.Bytes(torrents[i].RateDownload),
humanize.Bytes(torrents[i].RateUpload), torrents[i].Ratio()))
}
if buf.Len() == 0 {
send("*head:* no torrents", ud.Message.Chat.ID, false)
return
}
msgID := send(buf.String(), ud.Message.Chat.ID, true)
if NoLive {
return
}
// keep the info live
for i := 0; i < duration; i++ {
time.Sleep(time.Second * interval)
buf.Reset()
torrents, err = Client.GetTorrents()
if err != nil {
continue // try again if some error heppened
}
if len(torrents) < 1 {
continue
}
// make sure that we stay in the boundaries
if n <= 0 || n > len(torrents) {
n = len(torrents)
}
for _, torrent := range torrents[:n] {
torrentName := mdReplacer.Replace(torrent.Name) // escape markdown
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()),
humanize.Bytes(torrent.SizeWhenDone), torrent.PercentDone*100, humanize.Bytes(torrent.RateDownload),
humanize.Bytes(torrent.RateUpload), torrent.Ratio()))
}
// no need to check if it is empty, as if the buffer is empty telegram won't change the message
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, buf.String())
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}
}
// tail lists the last 5 or n torrents
func tail(ud tgbotapi.Update, tokens []string) {
var (
n = 5 // default to 5
err error
)
if len(tokens) > 0 {
n, err = strconv.Atoi(tokens[0])
if err != nil {
send("*tail:* argument must be a number", ud.Message.Chat.ID, false)
return
}
}
torrents, err := Client.GetTorrents()
if err != nil {
send("*tail:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
// make sure that we stay in the boundaries
if n <= 0 || n > len(torrents) {
n = len(torrents)
}
buf := new(bytes.Buffer)
for _, torrent := range torrents[len(torrents)-n:] {
torrentName := mdReplacer.Replace(torrent.Name) // escape markdown
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()),
humanize.Bytes(torrent.SizeWhenDone), torrent.PercentDone*100, humanize.Bytes(torrent.RateDownload),
humanize.Bytes(torrent.RateUpload), torrent.Ratio()))
}
if buf.Len() == 0 {
send("*tail:* no torrents", ud.Message.Chat.ID, false)
return
}
msgID := send(buf.String(), ud.Message.Chat.ID, true)
if NoLive {
return
}
// keep the info live
for i := 0; i < duration; i++ {
time.Sleep(time.Second * interval)
buf.Reset()
torrents, err = Client.GetTorrents()
if err != nil {
continue // try again if some error heppened
}
if len(torrents) < 1 {
continue
}
// make sure that we stay in the boundaries
if n <= 0 || n > len(torrents) {
n = len(torrents)
}
for _, torrent := range torrents[len(torrents)-n:] {
torrentName := mdReplacer.Replace(torrent.Name) // escape markdown
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()),
humanize.Bytes(torrent.SizeWhenDone), torrent.PercentDone*100, humanize.Bytes(torrent.RateDownload),
humanize.Bytes(torrent.RateUpload), torrent.Ratio()))
}
// no need to check if it is empty, as if the buffer is empty telegram won't change the message
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, buf.String())
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}
}
// downs will send the names of torrents with status 'Downloading' or in queue to
func downs(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*downs:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
// Downloading or in queue to download
if torrents[i].Status == transmission.StatusDownloading ||
torrents[i].Status == transmission.StatusDownloadPending {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
}
if buf.Len() == 0 {
send("No downloads", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// seeding will send the names of the torrents with the status 'Seeding' or in the queue to
func seeding(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*seeding:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if torrents[i].Status == transmission.StatusSeeding ||
torrents[i].Status == transmission.StatusSeedPending {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
}
if buf.Len() == 0 {
send("No torrents seeding", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// paused will send the names of the torrents with status 'Paused'
func paused(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*paused:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if torrents[i].Status == transmission.StatusStopped {
buf.WriteString(fmt.Sprintf("<%d> %s\n%s (%.1f%%) DL: %s UL: %s R: %s\n\n",
torrents[i].ID, torrents[i].Name, torrents[i].TorrentStatus(),
torrents[i].PercentDone*100, humanize.Bytes(torrents[i].DownloadedEver),
humanize.Bytes(torrents[i].UploadedEver), torrents[i].Ratio()))
}
}
if buf.Len() == 0 {
send("No paused torrents", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// checking will send the names of torrents with the status 'verifying' or in the queue to
func checking(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*checking:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if torrents[i].Status == transmission.StatusChecking ||
torrents[i].Status == transmission.StatusCheckPending {
buf.WriteString(fmt.Sprintf("<%d> %s\n%s (%.1f%%)\n\n",
torrents[i].ID, torrents[i].Name, torrents[i].TorrentStatus(),
torrents[i].PercentDone*100))
}
}
if buf.Len() == 0 {
send("No torrents verifying", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// active will send torrents that are actively downloading or uploading
func active(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*active:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if torrents[i].RateDownload > 0 ||
torrents[i].RateUpload > 0 {
// escape markdown
torrentName := mdReplacer.Replace(torrents[i].Name)
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrents[i].ID, torrentName, torrents[i].TorrentStatus(), humanize.Bytes(torrents[i].Have()),
humanize.Bytes(torrents[i].SizeWhenDone), torrents[i].PercentDone*100, humanize.Bytes(torrents[i].RateDownload),
humanize.Bytes(torrents[i].RateUpload), torrents[i].Ratio()))
}
}
if buf.Len() == 0 {
send("No active torrents", ud.Message.Chat.ID, false)
return
}
msgID := send(buf.String(), ud.Message.Chat.ID, true)
if NoLive {
return
}
// keep the active list live for 'duration * interval'
for i := 0; i < duration; i++ {
time.Sleep(time.Second * interval)
// reset the buffer to reuse it
buf.Reset()
// update torrents
torrents, err = Client.GetTorrents()
if err != nil {
continue // if there was error getting torrents, skip to the next iteration
}
// do the same loop again
for i := range torrents {
if torrents[i].RateDownload > 0 ||
torrents[i].RateUpload > 0 {
torrentName := mdReplacer.Replace(torrents[i].Name) // replace markdown chars
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\n\n",
torrents[i].ID, torrentName, torrents[i].TorrentStatus(), humanize.Bytes(torrents[i].Have()),
humanize.Bytes(torrents[i].SizeWhenDone), torrents[i].PercentDone*100, humanize.Bytes(torrents[i].RateDownload),
humanize.Bytes(torrents[i].RateUpload), torrents[i].Ratio()))
}
}
// no need to check if it is empty, as if the buffer is empty telegram won't change the message
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, buf.String())
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}
// sleep one more time before putting the dashes
time.Sleep(time.Second * interval)
// replace the speed with dashes to indicate that we are done being live
buf.Reset()
for i := range torrents {
if torrents[i].RateDownload > 0 ||
torrents[i].RateUpload > 0 {
// escape markdown
torrentName := mdReplacer.Replace(torrents[i].Name)
buf.WriteString(fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *-* ↑ *-* R: *%s*\n\n",
torrents[i].ID, torrentName, torrents[i].TorrentStatus(), humanize.Bytes(torrents[i].Have()),
humanize.Bytes(torrents[i].SizeWhenDone), torrents[i].PercentDone*100, torrents[i].Ratio()))
}
}
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, buf.String())
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}
// errors will send torrents with errors
func errors(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*errors:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if torrents[i].Error != 0 {
buf.WriteString(fmt.Sprintf("<%d> %s\n%s\n",
torrents[i].ID, torrents[i].Name, torrents[i].ErrorString))
}
}
if buf.Len() == 0 {
send("No errors", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// sort changes torrents sorting
func sort(ud tgbotapi.Update, tokens []string) {
if len(tokens) == 0 {
send(`*sort* takes one of:
(*id, name, age, size, progress, downspeed, upspeed, download, upload, ratio*)
optionally start with (*rev*) for reversed order
e.g. "*sort rev size*" to get biggest torrents first.`, ud.Message.Chat.ID, true)
return
}
var reversed bool
if strings.ToLower(tokens[0]) == "rev" {
reversed = true
tokens = tokens[1:]
}
switch strings.ToLower(tokens[0]) {
case "id":
if reversed {
Client.SetSort(transmission.SortRevID)
break
}
Client.SetSort(transmission.SortID)
case "name":
if reversed {
Client.SetSort(transmission.SortRevName)
break
}
Client.SetSort(transmission.SortName)
case "age":
if reversed {
Client.SetSort(transmission.SortRevAge)
break
}
Client.SetSort(transmission.SortAge)
case "size":
if reversed {
Client.SetSort(transmission.SortRevSize)
break
}
Client.SetSort(transmission.SortSize)
case "progress":
if reversed {
Client.SetSort(transmission.SortRevProgress)
break
}
Client.SetSort(transmission.SortProgress)
case "downspeed":
if reversed {
Client.SetSort(transmission.SortRevDownSpeed)
break
}
Client.SetSort(transmission.SortDownSpeed)
case "upspeed":
if reversed {
Client.SetSort(transmission.SortRevUpSpeed)
break
}
Client.SetSort(transmission.SortUpSpeed)
case "download":
if reversed {
Client.SetSort(transmission.SortRevDownloaded)
break
}
Client.SetSort(transmission.SortDownloaded)
case "upload":
if reversed {
Client.SetSort(transmission.SortRevUploaded)
break
}
Client.SetSort(transmission.SortUploaded)
case "ratio":
if reversed {
Client.SetSort(transmission.SortRevRatio)
break
}
Client.SetSort(transmission.SortRatio)
default:
send("unkown sorting method", ud.Message.Chat.ID, false)
return
}
if reversed {
send("*sort:* reversed "+tokens[0], ud.Message.Chat.ID, false)
return
}
send("*sort:* "+tokens[0], ud.Message.Chat.ID, false)
}
var trackerRegex = regexp.MustCompile(`[https?|udp]://([^:/]*)`)
// trackers will send a list of trackers and how many torrents each one has
func trackers(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*trackers:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
trackers := make(map[string]int)
for i := range torrents {
for _, tracker := range torrents[i].Trackers {
sm := trackerRegex.FindSubmatch([]byte(tracker.Announce))
if len(sm) > 1 {
currentTracker := string(sm[1])
n, ok := trackers[currentTracker]
if !ok {
trackers[currentTracker] = 1
continue
}
trackers[currentTracker] = n + 1
}
}
}
buf := new(bytes.Buffer)
for k, v := range trackers {
buf.WriteString(fmt.Sprintf("%d - %s\n", v, k))
}
if buf.Len() == 0 {
send("No trackers!", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// add takes an URL to a .torrent file to add it to transmission
func add(ud tgbotapi.Update, tokens []string) {
if len(tokens) == 0 {
send("*add:* needs at least one URL", ud.Message.Chat.ID, false)
return
}
// loop over the URL/s and add them
for _, url := range tokens {
cmd := transmission.NewAddCmdByURL(url)
torrent, err := Client.ExecuteAddCommand(cmd)
if err != nil {
send("*add:* "+err.Error(), ud.Message.Chat.ID, false)
continue
}
// check if torrent.Name is empty, then an error happened
if torrent.Name == "" {
send("*add:* error adding "+url, ud.Message.Chat.ID, false)
continue
}
send(fmt.Sprintf("*Added:* <%d> %s", torrent.ID, torrent.Name), ud.Message.Chat.ID, false)
}
}
// receiveTorrent gets an update that potentially has a .torrent file to add
func receiveTorrent(ud tgbotapi.Update) {
if ud.Message.Document == nil {
return // has no document
}
// get the file ID and make the config
fconfig := tgbotapi.FileConfig{
FileID: ud.Message.Document.FileID,
}
file, err := Bot.GetFile(fconfig)
if err != nil {
send("*receiver:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
// add by file URL
add(ud, []string{file.Link(BotToken)})
}
// search takes a query and returns torrents with match
func search(ud tgbotapi.Update, tokens []string) {
// make sure that we got a query
if len(tokens) == 0 {
send("*search:* needs an argument", ud.Message.Chat.ID, false)
return
}
query := strings.Join(tokens, " ")
// "(?i)" for case insensitivity
regx, err := regexp.Compile("(?i)" + query)
if err != nil {
send("*search:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
torrents, err := Client.GetTorrents()
if err != nil {
send("*search:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
buf := new(bytes.Buffer)
for i := range torrents {
if regx.MatchString(torrents[i].Name) {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
}
if buf.Len() == 0 {
send("No matches!", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// latest takes n and returns the latest n torrents
func latest(ud tgbotapi.Update, tokens []string) {
var (
n = 5 // default to 5
err error
)
if len(tokens) > 0 {
n, err = strconv.Atoi(tokens[0])
if err != nil {
send("*latest:* argument must be a number", ud.Message.Chat.ID, false)
return
}
}
torrents, err := Client.GetTorrents()
if err != nil {
send("*latest:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
// make sure that we stay in the boundaries
if n <= 0 || n > len(torrents) {
n = len(torrents)
}
// sort by age, and set reverse to true to get the latest first
torrents.SortAge(true)
buf := new(bytes.Buffer)
for i := range torrents[:n] {
buf.WriteString(fmt.Sprintf("<%d> %s\n", torrents[i].ID, torrents[i].Name))
}
if buf.Len() == 0 {
send("*latest:* No torrents", ud.Message.Chat.ID, false)
return
}
send(buf.String(), ud.Message.Chat.ID, false)
}
// info takes an id of a torrent and returns some info about it
func info(ud tgbotapi.Update, tokens []string) {
if len(tokens) == 0 {
send("*info:* needs a torrent ID number", ud.Message.Chat.ID, false)
return
}
for _, id := range tokens {
torrentID, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*info:* %s is not a number", id), ud.Message.Chat.ID, false)
continue
}
// get the torrent
torrent, err := Client.GetTorrent(torrentID)
if err != nil {
send(fmt.Sprintf("*info:* Can't find a torrent with an ID of %d", torrentID), ud.Message.Chat.ID, false)
continue
}
// get the trackers using 'trackerRegex'
var trackers string
for _, tracker := range torrent.Trackers {
sm := trackerRegex.FindSubmatch([]byte(tracker.Announce))
if len(sm) > 1 {
trackers += string(sm[1]) + " "
}
}
// format the info
torrentName := mdReplacer.Replace(torrent.Name) // escape markdown
info := fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\nDL: *%s* UP: *%s*\nAdded: *%s*, ETA: *%s*\nTrackers: `%s`",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()), humanize.Bytes(torrent.SizeWhenDone),
torrent.PercentDone*100, humanize.Bytes(torrent.RateDownload), humanize.Bytes(torrent.RateUpload), torrent.Ratio(),
humanize.Bytes(torrent.DownloadedEver), humanize.Bytes(torrent.UploadedEver), time.Unix(torrent.AddedDate, 0).Format(time.Stamp),
torrent.ETA(), trackers)
// send it
msgID := send(info, ud.Message.Chat.ID, true)
if NoLive {
return
}
// this go-routine will make the info live for 'duration * interval'
go func(torrentID, msgID int) {
for i := 0; i < duration; i++ {
time.Sleep(time.Second * interval)
torrent, err = Client.GetTorrent(torrentID)
if err != nil {
continue // skip this iteration if there's an error retrieving the torrent's info
}
torrentName := mdReplacer.Replace(torrent.Name)
info := fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *%s* ↑ *%s* R: *%s*\nDL: *%s* UP: *%s*\nAdded: *%s*, ETA: *%s*\nTrackers: `%s`",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()), humanize.Bytes(torrent.SizeWhenDone),
torrent.PercentDone*100, humanize.Bytes(torrent.RateDownload), humanize.Bytes(torrent.RateUpload), torrent.Ratio(),
humanize.Bytes(torrent.DownloadedEver), humanize.Bytes(torrent.UploadedEver), time.Unix(torrent.AddedDate, 0).Format(time.Stamp),
torrent.ETA(), trackers)
// update the message
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, info)
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}
// sleep one more time before the dashes
time.Sleep(time.Second * interval)
// at the end write dashes to indicate that we are done being live.
torrentName := mdReplacer.Replace(torrent.Name)
info := fmt.Sprintf("`<%d>` *%s*\n%s *%s* of *%s* (*%.1f%%*) ↓ *- B* ↑ *- B* R: *%s*\nDL: *%s* UP: *%s*\nAdded: *%s*, ETA: *-*\nTrackers: `%s`",
torrent.ID, torrentName, torrent.TorrentStatus(), humanize.Bytes(torrent.Have()), humanize.Bytes(torrent.SizeWhenDone),
torrent.PercentDone*100, torrent.Ratio(), humanize.Bytes(torrent.DownloadedEver), humanize.Bytes(torrent.UploadedEver),
time.Unix(torrent.AddedDate, 0).Format(time.Stamp), trackers)
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, info)
editConf.ParseMode = tgbotapi.ModeMarkdown
Bot.Send(editConf)
}(torrentID, msgID)
}
}
// stop takes id[s] of torrent[s] or 'all' to stop them
func stop(ud tgbotapi.Update, tokens []string) {
// make sure that we got at least one argument
if len(tokens) == 0 {
send("*stop:* needs an argument", ud.Message.Chat.ID, false)
return
}
// if the first argument is 'all' then stop all torrents
if tokens[0] == "all" {
if err := Client.StopAll(); err != nil {
send("*stop:* error occurred while stopping some torrents", ud.Message.Chat.ID, false)
return
}
send("Stopped all torrents", ud.Message.Chat.ID, false)
return
}
for _, id := range tokens {
num, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*stop:* %s is not a number", id), ud.Message.Chat.ID, false)
continue
}
status, err := Client.StopTorrent(num)
if err != nil {
send("*stop:* "+err.Error(), ud.Message.Chat.ID, false)
continue
}
torrent, err := Client.GetTorrent(num)
if err != nil {
send(fmt.Sprintf("[fail] *stop:* No torrent with an ID of %d", num), ud.Message.Chat.ID, false)
return
}
send(fmt.Sprintf("[%s] *stop:* %s", status, torrent.Name), ud.Message.Chat.ID, false)
}
}
// start takes id[s] of torrent[s] or 'all' to start them
func start(ud tgbotapi.Update, tokens []string) {
// make sure that we got at least one argument
if len(tokens) == 0 {
send("*start:* needs an argument", ud.Message.Chat.ID, false)
return
}
// if the first argument is 'all' then start all torrents
if tokens[0] == "all" {
if err := Client.StartAll(); err != nil {
send("*start:* error occurred while starting some torrents", ud.Message.Chat.ID, false)
return
}
send("Started all torrents", ud.Message.Chat.ID, false)
return
}
for _, id := range tokens {
num, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*start:* %s is not a number", id), ud.Message.Chat.ID, false)
continue
}
status, err := Client.StartTorrent(num)
if err != nil {
send("*start:* "+err.Error(), ud.Message.Chat.ID, false)
continue
}
torrent, err := Client.GetTorrent(num)
if err != nil {
send(fmt.Sprintf("[fail] *start:* No torrent with an ID of %d", num), ud.Message.Chat.ID, false)
return
}
send(fmt.Sprintf("[%s] *start:* %s", status, torrent.Name), ud.Message.Chat.ID, false)
}
}
// check takes id[s] of torrent[s] or 'all' to verify them
func check(ud tgbotapi.Update, tokens []string) {
// make sure that we got at least one argument
if len(tokens) == 0 {
send("*check:* needs an argument", ud.Message.Chat.ID, false)
return
}
// if the first argument is 'all' then start all torrents
if tokens[0] == "all" {
if err := Client.VerifyAll(); err != nil {
send("*check:* error occurred while verifying some torrents", ud.Message.Chat.ID, false)
return
}
send("Verifying all torrents", ud.Message.Chat.ID, false)
return
}
for _, id := range tokens {
num, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*check:* %s is not a number", id), ud.Message.Chat.ID, false)
continue
}
status, err := Client.VerifyTorrent(num)
if err != nil {
send("*check:* "+err.Error(), ud.Message.Chat.ID, false)
continue
}
torrent, err := Client.GetTorrent(num)
if err != nil {
send(fmt.Sprintf("[fail] *check:* No torrent with an ID of %d", num), ud.Message.Chat.ID, false)
return
}
send(fmt.Sprintf("[%s] *check:* %s", status, torrent.Name), ud.Message.Chat.ID, false)
}
}
// stats echo back transmission stats
func stats(ud tgbotapi.Update) {
stats, err := Client.GetStats()
if err != nil {
send("*stats:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
msg := fmt.Sprintf(
`
Total: *%d*
Active: *%d*
Paused: *%d*
_Current Stats_
Downloaded: *%s*
Uploaded: *%s*
Running time: *%s*
_Accumulative Stats_
Sessions: *%d*
Downloaded: *%s*
Uploaded: *%s*
Total Running time: *%s*
`,
stats.TorrentCount,
stats.ActiveTorrentCount,
stats.PausedTorrentCount,
humanize.Bytes(stats.CurrentStats.DownloadedBytes),
humanize.Bytes(stats.CurrentStats.UploadedBytes),
stats.CurrentActiveTime(),
stats.CumulativeStats.SessionCount,
humanize.Bytes(stats.CumulativeStats.DownloadedBytes),
humanize.Bytes(stats.CumulativeStats.UploadedBytes),
stats.CumulativeActiveTime(),
)
send(msg, ud.Message.Chat.ID, true)
}
// speed will echo back the current download and upload speeds
func speed(ud tgbotapi.Update) {
stats, err := Client.GetStats()
if err != nil {
send("*speed:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
msg := fmt.Sprintf("↓ %s ↑ %s", humanize.Bytes(stats.DownloadSpeed), humanize.Bytes(stats.UploadSpeed))
msgID := send(msg, ud.Message.Chat.ID, false)
if NoLive {
return
}
for i := 0; i < duration; i++ {
time.Sleep(time.Second * interval)
stats, err = Client.GetStats()
if err != nil {
continue
}
msg = fmt.Sprintf("↓ %s ↑ %s", humanize.Bytes(stats.DownloadSpeed), humanize.Bytes(stats.UploadSpeed))
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, msg)
Bot.Send(editConf)
time.Sleep(time.Second * interval)
}
// sleep one more time before switching to dashes
time.Sleep(time.Second * interval)
// show dashes to indicate that we are done updating.
editConf := tgbotapi.NewEditMessageText(ud.Message.Chat.ID, msgID, "↓ - B ↑ - B")
Bot.Send(editConf)
}
// count returns current torrents count per status
func count(ud tgbotapi.Update) {
torrents, err := Client.GetTorrents()
if err != nil {
send("*count:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
var downloading, seeding, stopped, checking, downloadingQ, seedingQ, checkingQ int
for i := range torrents {
switch torrents[i].Status {
case transmission.StatusDownloading:
downloading++
case transmission.StatusSeeding:
seeding++
case transmission.StatusStopped:
stopped++
case transmission.StatusChecking:
checking++
case transmission.StatusDownloadPending:
downloadingQ++
case transmission.StatusSeedPending:
seedingQ++
case transmission.StatusCheckPending:
checkingQ++
}
}
msg := fmt.Sprintf("Downloading: %d\nSeeding: %d\nPaused: %d\nVerifying: %d\n\n- Waiting to -\nDownload: %d\nSeed: %d\nVerify: %d\n\nTotal: %d",
downloading, seeding, stopped, checking, downloadingQ, seedingQ, checkingQ, len(torrents))
send(msg, ud.Message.Chat.ID, false)
}
// del takes an id or more, and delete the corresponding torrent/s
func del(ud tgbotapi.Update, tokens []string) {
// make sure that we got an argument
if len(tokens) == 0 {
send("*del:* needs an ID", ud.Message.Chat.ID, false)
return
}
// loop over tokens to read each potential id
for _, id := range tokens {
num, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*del:* %s is not an ID", id), ud.Message.Chat.ID, false)
return
}
name, err := Client.DeleteTorrent(num, false)
if err != nil {
send("*del:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
send("*Deleted:* "+name, ud.Message.Chat.ID, false)
}
}
// deldata takes an id or more, and delete the corresponding torrent/s with their data
func deldata(ud tgbotapi.Update, tokens []string) {
// make sure that we got an argument
if len(tokens) == 0 {
send("*deldata:* needs an ID", ud.Message.Chat.ID, false)
return
}
// loop over tokens to read each potential id
for _, id := range tokens {
num, err := strconv.Atoi(id)
if err != nil {
send(fmt.Sprintf("*deldata:* %s is not an ID", id), ud.Message.Chat.ID, false)
return
}
name, err := Client.DeleteTorrent(num, true)
if err != nil {
send("*deldata:* "+err.Error(), ud.Message.Chat.ID, false)
return
}
send("Deleted with data: "+name, ud.Message.Chat.ID, false)
}
}
// getVersion sends transmission version + transmission-telegram version
func getVersion(ud tgbotapi.Update) {
send(fmt.Sprintf("Transmission *%s*\nTransmission-telegram *%s*", Client.Version(), VERSION), ud.Message.Chat.ID, true)
}
// send takes a chat id and a message to send, returns the message id of the send message
func send(text string, chatID int64, markdown bool) int {
// set typing action
action := tgbotapi.NewChatAction(chatID, tgbotapi.ChatTyping)
Bot.Send(action)
// check the rune count, telegram is limited to 4096 chars per message;
// so if our message is > 4096, split it in chunks the send them.
msgRuneCount := utf8.RuneCountInString(text)
LenCheck:
stop := 4095
if msgRuneCount > 4096 {
for text[stop] != 10 { // '\n'
stop--
}
msg := tgbotapi.NewMessage(chatID, text[:stop])
msg.DisableWebPagePreview = true
if markdown {
msg.ParseMode = tgbotapi.ModeMarkdown
}
// send current chunk
if _, err := Bot.Send(msg); err != nil {
logger.Printf("[ERROR] Send: %s", err)
}
// move to the next chunk
text = text[stop:]
msgRuneCount = utf8.RuneCountInString(text)
goto LenCheck
}
// if msgRuneCount < 4096, send it normally
msg := tgbotapi.NewMessage(chatID, text)
msg.DisableWebPagePreview = true
if markdown {
msg.ParseMode = tgbotapi.ModeMarkdown
}
resp, err := Bot.Send(msg)
if err != nil {
logger.Printf("[ERROR] Send: %s", err)
}
return resp.MessageID
}
| [
"\"TT_BOTT\"",
"\"TR_AUTH\""
] | [] | [
"TR_AUTH",
"TT_BOTT"
] | [] | ["TR_AUTH", "TT_BOTT"] | go | 2 | 0 | |
airflow/models/variable.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import os
from typing import Any, Optional
from cryptography.fernet import InvalidToken as InvalidFernetToken
from sqlalchemy import Boolean, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import Session, synonym
from airflow.configuration import ensure_secrets_loaded
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
log = logging.getLogger()
class Variable(Base, LoggingMixin):
"""
Variables are a generic way to store and retrieve arbitrary content or settings
as a simple key value store within Airflow.
"""
__tablename__ = "variable"
__NO_DEFAULT_SENTINEL = object()
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
description = Column(Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __init__(self, key=None, val=None, description=None):
super().__init__()
self.key = key
self.val = val
self.description = description
def __repr__(self):
# Hiding the value
return f'{self.key} : {self._val}'
def get_val(self):
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key"""
if self._val is not None and self.is_encrypted:
try:
fernet = get_fernet()
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
except InvalidFernetToken:
self.log.error("Can't decrypt _val for key=%s, invalid token or value", self.key)
return None
except Exception: # pylint: disable=broad-except
self.log.error("Can't decrypt _val for key=%s, FERNET_KEY configuration missing", self.key)
return None
else:
return self._val
def set_val(self, value):
"""Encode the specified value with Fernet Key and store it in Variables Table."""
if value is not None:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def val(cls): # pylint: disable=no-self-argument
"""Get Airflow Variable from Metadata DB and decode it using the Fernet Key"""
return synonym('_val', descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
@classmethod
def get(
cls,
key: str,
default_var: Any = __NO_DEFAULT_SENTINEL,
deserialize_json: bool = False,
) -> Any:
"""
Gets a value for an Airflow Variable Key
:param key: Variable Key
:param default_var: Default value of the Variable if the Variable doesn't exists
:param deserialize_json: Deserialize the value to a Python dict
"""
var_val = Variable.get_variable_from_secrets(key=key)
if var_val is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
else:
raise KeyError(f'Variable {key} does not exist')
else:
if deserialize_json:
return json.loads(var_val)
else:
return var_val
@classmethod
@provide_session
def set(cls, key: str, value: Any, serialize_json: bool = False, session: Session = None):
"""
Sets a value for an Airflow Variable with a given Key
:param key: Variable Key
:param value: Value to set for the Variable
:param serialize_json: Serialize the value to a JSON string
:param session: SQL Alchemy Sessions
"""
env_var_name = "AIRFLOW_VAR_" + key.upper()
if env_var_name in os.environ:
log.warning(
"You have the environment variable %s defined, which takes precedence over reading "
"from the database. The value will be saved, but to read it you have to delete "
"the environment variable.",
env_var_name,
)
if serialize_json:
stored_value = json.dumps(value, indent=2)
else:
stored_value = str(value)
Variable.delete(key, session=session)
session.add(Variable(key=key, val=stored_value))
session.flush()
@classmethod
@provide_session
def delete(cls, key: str, session: Session = None) -> int:
"""
Delete an Airflow Variable for a given key
:param key: Variable Key
:param session: SQL Alchemy Sessions
"""
return session.query(cls).filter(cls.key == key).delete()
def rotate_fernet_key(self):
"""Rotate Fernet Key"""
fernet = get_fernet()
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode('utf-8')).decode()
@staticmethod
def get_variable_from_secrets(key: str) -> Optional[str]:
"""
Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:return: Variable Value
"""
for secrets_backend in ensure_secrets_loaded():
var_val = secrets_backend.get_variable(key=key)
if var_val is not None:
return var_val
return None
| [] | [] | [] | [] | [] | python | 0 | 0 | |
scripts/run_pyright.py | # -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import os
import sys
import subprocess
def main():
subprocess.call(
["npx", "pyright"],
env={
**os.environ,
"PATH": os.path.pathsep.join(
(
os.path.dirname(sys.executable),
os.getenv("PATH") or "",
)
),
},
shell=True,
)
if __name__ == "__main__":
main()
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
client/informers/externalversions/azurerm/v1alpha1/mssqldatabasevulnerabilityassessmentrulebaseline.go | /*
Copyright The Kubeform Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
azurermv1alpha1 "kubeform.dev/kubeform/apis/azurerm/v1alpha1"
versioned "kubeform.dev/kubeform/client/clientset/versioned"
internalinterfaces "kubeform.dev/kubeform/client/informers/externalversions/internalinterfaces"
v1alpha1 "kubeform.dev/kubeform/client/listers/azurerm/v1alpha1"
)
// MssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer provides access to a shared informer and lister for
// MssqlDatabaseVulnerabilityAssessmentRuleBaselines.
type MssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.MssqlDatabaseVulnerabilityAssessmentRuleBaselineLister
}
type mssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer constructs a new informer for MssqlDatabaseVulnerabilityAssessmentRuleBaseline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer constructs a new informer for MssqlDatabaseVulnerabilityAssessmentRuleBaseline type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AzurermV1alpha1().MssqlDatabaseVulnerabilityAssessmentRuleBaselines(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AzurermV1alpha1().MssqlDatabaseVulnerabilityAssessmentRuleBaselines(namespace).Watch(context.TODO(), options)
},
},
&azurermv1alpha1.MssqlDatabaseVulnerabilityAssessmentRuleBaseline{},
resyncPeriod,
indexers,
)
}
func (f *mssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredMssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *mssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&azurermv1alpha1.MssqlDatabaseVulnerabilityAssessmentRuleBaseline{}, f.defaultInformer)
}
func (f *mssqlDatabaseVulnerabilityAssessmentRuleBaselineInformer) Lister() v1alpha1.MssqlDatabaseVulnerabilityAssessmentRuleBaselineLister {
return v1alpha1.NewMssqlDatabaseVulnerabilityAssessmentRuleBaselineLister(f.Informer().GetIndexer())
}
| [] | [] | [] | [] | [] | go | null | null | null |
i2catlib/lib.go | package i2catlib
import (
"io"
"image"
"bytes"
"image/png"
_ "image/jpeg"
_ "image/gif"
"os"
"fmt"
"encoding/base64"
)
func ReadIn(in io.Reader) ([]byte, error) {
img, _, err := image.Decode(in)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
if err = png.Encode(buf, img); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func isScreen() bool {
return os.Getenv("TERM") == "screen"
}
func encode(src []byte, dest io.Writer) error {
enc := base64.NewEncoder(base64.StdEncoding, dest)
_, err := enc.Write(src)
return err
}
func stdout(output string) error {
_, err := fmt.Fprint(os.Stdout, output)
return err
}
func PrintImg(img io.Reader) error {
buf, err := ReadIn(img)
if err != nil {
return err
}
isScr := isScreen()
if isScr {
stdout( "\033Ptmux;\033")
}
stdout("\033]1337;File=;inline=1:")
if err = encode(buf, os.Stdout); err != nil {
return err
}
stdout("\a")
if isScr {
stdout("\033\\")
}
stdout("\n")
return nil
}
| [
"\"TERM\""
] | [] | [
"TERM"
] | [] | ["TERM"] | go | 1 | 0 | |
lib/misc.go | package pitchfork
import (
"bytes"
"crypto/sha256"
"errors"
"fmt"
"github.com/disintegration/imaging"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
var LogLocation = false
func where_strippath(path string, workdir string, gopath string) (strippedpath string) {
wl := len(workdir)
fl := len(path)
/* If the prefix matches, remove it, to lessen output */
if wl > 1 && fl > (wl+1) && workdir == path[0:wl] {
path = path[wl+1:]
} else {
/* Shrink too when in GOPATH */
gp := filepath.Join(gopath, "src")
gl := len(gp)
if fl > (gl+1) && gp == path[0:gl] {
path = path[gl+1:]
}
}
return path
}
func Where(off int) string {
file := "<autogenerated>"
line := 0
/* Unwrap interfaces, they show up as "<autogenerated>" */
for n := 1; file == "<autogenerated>"; n++ {
_, file, line, _ = runtime.Caller(n + off)
}
/* Get work directory (where we where started from) */
wd, _ := os.Getwd()
gopath := os.Getenv("GOPATH")
/* Strip the path where possible */
file = where_strippath(file, wd, gopath)
return file + ":" + strconv.Itoa(line)
}
func LogLoc(off int, pfx string) string {
if !LogLocation {
if pfx != "" {
return pfx + ": "
}
return ""
}
s := ""
if pfx != "" {
s += pfx + "("
}
s += Where(2 + off)
if pfx != "" {
s += ") "
} else {
s += " "
}
return s
}
/*
* Encode non-ASCII chars in URL-encoded format
* This to make sure control-codes etc do not end up in output
*
* We encode in URL-encoded format, but we do not escape HTML chars
* anything rendering those should handle that part properly.
*
* Note: this might not be unicode friendly
*/
func OutEsc(str string) (nstr string) {
nstr = ""
for i := 0; i < len(str); i++ {
c := str[i]
/* Anything outside of space ' ' to tilde '~' + '%' itself */
if c < 32 || c > 126 || c == '%' {
nstr += "%"
nstr += string("0123456789ABCDEF"[c>>4])
nstr += string("0123456789ABCDEF"[c&15])
} else {
nstr += string(c)
}
}
return
}
/* Silly golang log functions just ignore errors... thus do our own */
func OutA(format string, a ...interface{}) {
str := fmt.Sprintf(format, a...)
/* Escape odd chars */
str = OutEsc(str)
err := log.Output(2, str)
if err != nil {
/* Not really anything else we can do actually */
fmt.Printf("Logging error '%s' for message: %s", err.Error(), str)
}
}
/*
* Logging - centralized and to avoid importing log everywhere
* Might extend this with extra params to make per-context tracing possible
*/
func ErrA(off int, format string, a ...interface{}) {
OutA(LogLoc(1+off, "Error")+format, a...)
}
func Err(message string) {
ErrA(1, message)
}
func Errf(format string, a ...interface{}) {
ErrA(1, format, a...)
}
func LogA(off int, format string, a ...interface{}) {
OutA(LogLoc(1+off, "")+format, a...)
}
func Log(message string) {
LogA(1, message)
}
func Logf(format string, a ...interface{}) {
LogA(1, format, a...)
}
func DbgA(off int, format string, a ...interface{}) {
if !Debug {
return
}
where := Where(1 + off)
pc := make([]uintptr, 10+off)
runtime.Callers(2+off, pc)
f := runtime.FuncForPC(pc[0])
OutA("Debug("+where+") "+f.Name()+" "+format, a...)
}
func Dbg(message string) {
DbgA(1, message)
}
func Dbgf(format string, a ...interface{}) {
DbgA(1, format, a...)
}
/* IsTrue() defaults to false if the string is not known */
func IsTrue(val string) (b bool) {
if val == "yes" || val == "true" || val == "on" {
b = true
} else {
b = false
}
return b
}
func YesNo(b bool) (val string) {
if b == true {
return "yes"
}
return "no"
}
func NormalizeBoolean(val string) string {
return YesNo(IsTrue(val))
}
/* Parse the string (obeying quoting) */
func SplitArgs(str string) (args []string) {
r := regexp.MustCompile("'.+'|\".+\"|\\S+")
return r.FindAllString(str, -1)
}
func Daemon(nochdir int, noclose int) (err error) {
var ret uintptr
var ret2 uintptr
var serr syscall.Errno
/* Already a daemon? */
if syscall.Getppid() == 1 {
return nil
}
/* Fork off the parent process */
ret, ret2, serr = syscall.RawSyscall(syscall.SYS_FORK, 0, 0, 0)
if serr != 0 {
return errors.New("fork() failed")
}
/* Fork failure */
if ret2 < 0 {
os.Exit(-1)
}
/* If we got a good PID, then we call exit the parent process */
if ret > 0 {
os.Exit(0)
}
/* Change the file mode mask */
_ = syscall.Umask(0)
/* Create a new SID for the child process */
_, err = syscall.Setsid()
if err != nil {
return errors.New("Error: syscall.Setsid errno: " + err.Error())
}
if nochdir == 0 {
os.Chdir("/")
}
if noclose == 0 {
f, e := os.OpenFile(os.DevNull, os.O_RDWR, 0)
if e == nil {
fd := int(f.Fd())
syscall.Dup2(int(fd), int(os.Stdin.Fd()))
syscall.Dup2(int(fd), int(os.Stdout.Fd()))
syscall.Dup2(int(fd), int(os.Stderr.Fd()))
}
}
return nil
}
func SetUID(username string) (err error) {
var u *user.User
var uid int
var gid int
u, err = user.Lookup(username)
if err != nil {
return err
}
uid, err = strconv.Atoi(u.Uid)
gid, err = strconv.Atoi(u.Gid)
if gid != 0 {
if err := syscall.Setgid(gid); err != nil {
log.Fatalf("failed to Setgid(%d): %v", gid, err)
}
}
if uid != 0 {
if err := syscall.Setuid(uid); err != nil {
log.Fatalf("failed to Setuid(%d): %v", uid, err)
}
}
return nil
}
func GetPID() (pid string) {
return strconv.Itoa(syscall.Getpid())
}
func StorePID(filename string, pid string) {
err := ioutil.WriteFile(filename, []byte(pid), 0644)
if err != nil {
log.Println("Failed to store PID in " + filename + ": " + err.Error())
}
}
func SortKeys(tbl map[string]string) (keys []string) {
for k := range tbl {
keys = append(keys, k)
}
sort.Strings(keys)
return
}
func Hex(data []byte) string {
return fmt.Sprintf("%x", data)
}
func HashIt(input string) (hash string) {
h := sha256.New()
h.Write([]byte(input))
return Hex(h.Sum(nil))
}
func Fullname_to_ident(name string) (out string, err error) {
/* Force lower case */
out = strings.ToLower(name)
/* Only pay attention to the letters in the name */
validID := regexp.MustCompile(`[^a-z]+`)
out = validID.ReplaceAllLiteralString(out, "")
/* Append a random number */
out += strconv.Itoa(Rand_number(8999) + 1000)
/* Verify it */
out, err = Chk_ident("UserName", out)
return
}
func Chk_ident(name string, in string) (out string, err error) {
/* Require something at least */
if in == "" {
err = errors.New("No " + name + " provided (empty)")
return
}
/* Force lower case */
out = strings.ToLower(in)
ok, err := regexp.MatchString(Config.Username_regexp, out)
if !ok {
err = errors.New("Invalid characters in " + name + ", only " + Config.Username_regexp + " are allowed")
return
}
/* All okay and filtered */
return
}
func Image_resize(file io.Reader, maxsize string) (bits []byte, err error) {
var im image.Image
s := strings.SplitN(maxsize, "x", 2)
max_w, _ := strconv.Atoi(s[0])
max_h, _ := strconv.Atoi(s[1])
im, _, err = image.Decode(file)
if err != nil {
return
}
/* Fit it in the box */
im = imaging.Fit(im, max_w, max_h, imaging.Lanczos)
/* Re-encode it as a PNG */
buf := &bytes.Buffer{}
err = imaging.Encode(buf, im, imaging.PNG)
if err != nil {
return
}
bits = buf.Bytes()
return
}
func Rand_number(max int) int {
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
return r1.Intn(max)
}
func ToUTF8(iso8859_1_buf []byte) string {
buf := make([]rune, len(iso8859_1_buf))
for i, b := range iso8859_1_buf {
buf[i] = rune(b)
}
return string(buf)
}
func CopyFile(ctx PfCtx, verbose bool, source string, dest string) (err error) {
srcf, err := os.Open(source)
if err != nil {
return err
}
defer srcf.Close()
dstf, err := os.Create(dest)
if err != nil {
return err
}
defer dstf.Close()
_, err = io.Copy(dstf, srcf)
if err != nil {
return
}
src, err := os.Stat(source)
if err != nil {
err = os.Chmod(dest, src.Mode())
}
if verbose {
ctx.OutLn("Copied File %s -> %s", source, dest)
}
return
}
func CopyDir(ctx PfCtx, verbose bool, source string, dest string) (err error) {
if verbose {
ctx.OutLn("Copying Directory %s -> %s", source, dest)
}
/* Get properties of source directory */
sourceinfo, err := os.Stat(source)
if err != nil {
return err
}
/* Create destination directory (may already exist) */
err = os.MkdirAll(dest, sourceinfo.Mode())
if err != nil && err != os.ErrExist {
return err
}
dir, err := os.Open(source)
if err != nil {
return err
}
objs, err := dir.Readdir(-1)
if err != nil {
return err
}
for _, obj := range objs {
srcfptr := source + "/" + obj.Name()
dstfptr := dest + "/" + obj.Name()
if obj.IsDir() {
/* Copy sub-directories recursively */
err = CopyDir(ctx, verbose, srcfptr, dstfptr)
if err != nil {
fmt.Println(err)
}
} else {
/* File Copies */
err = CopyFile(ctx, verbose, srcfptr, dstfptr)
if err != nil {
fmt.Println(err)
}
}
}
return
}
func ThisFunc() string {
pc := make([]uintptr, 10)
runtime.Callers(2, pc)
f := runtime.FuncForPC(pc[0])
return f.Name()
}
func TrackStart() time.Time {
return time.Now()
}
func TrackTime(start time.Time, name string) (elapsed time.Duration) {
elapsed = time.Since(start)
DbgA(1, "%s took %s", name, elapsed)
return
}
func Fmt_Time(t time.Time) string {
if t.IsZero() {
return "never"
}
return t.Format(Config.TimeFormat)
}
func ErrIsDisconnect(err error) bool {
neterr, ok := err.(net.Error)
if ok && !neterr.Temporary() || err == io.EOF {
return true
}
return false
}
/* Ensure that a URL ends in a slash */
func URL_EnsureSlash(url string) string {
if len(url) == 0 || url[len(url)-1] != '/' {
url += "/"
}
return url
}
/* Append two parts of a URL together, adding a '/' in the middle where needed */
func URL_Append(url1 string, url2 string) (url string) {
url1 = strings.TrimSpace(url1)
url2 = strings.TrimSpace(url2)
l1 := len(url1)
l2 := len(url2)
if l1 > 0 && url1[l1-1] == '/' {
if l2 > 0 && url2[0] == '/' {
/* Both have a '/' */
url = url1 + url2[1:]
} else {
/* 1 has, 2 not */
url = url1 + url2
}
} else {
if l2 > 0 && url2[0] == '/' {
/* 1 not, 2 has */
url = url1 + url2
} else {
/* Neither have a '/' */
url = url1 + "/" + url2
}
}
return
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
src/rabbitmq-admin/amqp-traffic/internal/confirm_and_run.go | package internal
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"github.com/vito/go-interact/interact"
)
const (
ok = 0
failed = 1
)
func ConfirmAndRun(explanation, success, helper string, commands []string) int {
showExplanation(explanation, helper, commands)
warnIfNotRoot()
carryOn := false
if err := interact.NewInteraction("Continue?").Resolve(&carryOn); err != nil {
fmt.Printf("Error: %s\n", err)
return failed
}
if !carryOn {
fmt.Println("Stopped.")
return failed
}
if runCommands(commands) {
fmt.Printf("\nFailed\n\n")
return failed
}
fmt.Printf("\n%s\n\n", success)
return ok
}
func showExplanation(explanation, helper string, commands []string) {
fmt.Printf("%s:\n\n", explanation)
for _, cmd := range commands {
fmt.Printf(" - %s\n", cmd)
}
fmt.Println("") // For spacing
fmt.Println(helper)
fmt.Println("") // For spacing
}
func runCommands(commands []string) bool {
errors := false
for _, cmd := range commands {
command := exec.Command("/usr/bin/env", strings.Split(cmd, " ")...)
command.Stdout = os.Stdout
command.Stdin = os.Stdin
command.Stderr = os.Stderr
if err := command.Run(); err != nil {
fmt.Printf("Error running command '%s': %s\n", cmd, err)
errors = true
}
}
return errors
}
func warnIfNotRoot() {
if getUid() != 0 {
fmt.Printf("WARNING, this command should be run as the root user!\n\n")
}
}
func getUid() int {
if fakeUid := os.Getenv("FAKE_UID"); fakeUid != "" {
if value, err := strconv.Atoi(fakeUid); err != nil {
panic(err)
} else {
return value
}
}
return os.Getuid()
}
| [
"\"FAKE_UID\""
] | [] | [
"FAKE_UID"
] | [] | ["FAKE_UID"] | go | 1 | 0 | |
Control-Tower/src/function/create_stackset_lambdav3.py | import json
import logging
import os
# from botocore.vendored import requests
import random
import string
import time
import boto3
import requests
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SUCCESS = "SUCCESS"
FAILED = "FAILED"
def cfnresponse_send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False):
responseUrl = event['ResponseURL']
print(responseUrl)
responseBody = {'Status': responseStatus,
'Reason': 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId': physicalResourceId or context.log_stream_name, 'StackId': event['StackId'],
'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'NoEcho': noEcho,
'Data': responseData}
json_responseBody = json.dumps(responseBody)
print("Response body:\n" + json_responseBody)
headers = {
'content-type': '',
'content-length': str(len(json_responseBody))
}
try:
response = requests.put(responseUrl,
data=json_responseBody,
headers=headers)
print("Status code: " + response.reason)
except Exception as e:
print("send(..) failed executing requests.put(..): " + str(e))
def get_secret_value(secret):
# key = 'falcon_client_secret'
key = secret
SM = boto3.client('secretsmanager')
secret_list = SM.list_secrets()['SecretList']
output = {}
for s in secret_list:
if key in s.values():
output = SM.get_secret_value(SecretId=key)['SecretString']
return output
def get_account_id(name='Log archive'):
"""Get the Account Id from AWS Organization"""
ORG = boto3.client('organizations')
result = ''
try:
orgList = ORG.list_accounts()['Accounts']
except Exception as e:
raise e
if len(orgList) > 1:
for i in orgList:
if i['Name'] == name:
result = i['Id']
return result
def get_master_id():
""" Get the master Id from AWS Organization - Only on master"""
masterID = ''
ORG = boto3.client('organizations')
try:
masterID = ORG.list_roots()['Roots'][0]['Arn'].rsplit(':')[4]
return masterID
except Exception as e:
logger.error('This stack runs only on the Master of the AWS Organization')
return False
def launch_crwd_discover(templateUrl, paramList, AdminRoleARN, ExecRole, cList, stacketsetName):
""" Launch CRWD Discover Stackset on the Master Account """
CFT = boto3.client('cloudformation')
result = {}
if len(paramList):
try:
result = CFT.create_stack_set(StackSetName=stacketsetName,
Description='Roles for CRWD-Discover',
TemplateURL=templateUrl,
Parameters=paramList,
AdministrationRoleARN=AdminRoleARN,
ExecutionRoleName=ExecRole,
Capabilities=cList)
return result
except ClientError as e:
if e.response['Error']['Code'] == 'NameAlreadyExistsException':
logger.info("StackSet already exists")
result['StackSetName'] = 'CRWD-ROLES-CREATION'
return result
else:
logger.error("Unexpected error: %s" % e)
result['Status'] = e
return result
def get_random_alphanum_string(stringLength=15):
lettersAndDigits = string.ascii_letters + string.digits
return ''.join((random.choice(lettersAndDigits) for i in range(stringLength)))
def delete_stackset(stacksetName):
CFT = boto3.client('cloudformation')
try:
stackset_result = CFT.describe_stack_set(StackSetName=stacksetName)
if stackset_result and 'StackSet' in stackset_result:
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
while 'NextToken' in stackset_instances:
stackinstancesnexttoken = stackset_instances['NextToken']
morestackinstances = CFT.list_stack_instances(NextToken=stackinstancesnexttoken)
stackset_instances["Summaries"].extend(morestackinstances["Summaries"])
if len(stackset_instances["Summaries"]) > 0:
stack_instance_members = [x["Account"] for x in stackset_instances["Summaries"]]
stack_instance_regions = list(set(x["Region"] for x in stackset_instances["Summaries"]))
CFT.delete_stack_instances(
StackSetName=stacksetName,
Accounts=stack_instance_members,
Regions=stack_instance_regions,
OperationPreferences={'MaxConcurrentCount': 3},
RetainStacks=False
)
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
counter = 2
while len(stackset_instances["Summaries"]) > 0 and counter > 0:
logger.info("Deleting stackset instance from {}, remaining {}, "
"sleeping for 10 sec".format(stacksetName, len(stackset_instances["Summaries"])))
time.sleep(10)
counter = counter - 1
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
if counter > 0:
CFT.delete_stack_set(StackSetName=stacksetName)
logger.info("StackSet {} deleted".format(stacksetName))
else:
logger.info("StackSet {} still has stackset instance, skipping".format(stacksetName))
return True
except ClientError as e:
if e.response['Error']['Code'] == 'StackSetNotFoundException':
logger.info("StackSet {} does not exist".format(stacksetName))
return True
else:
logger.error("Unexpected error: %s" % e)
return False
def lambda_handler(event, context):
try:
STACKSETNAME = 'CrowdstrikeDiscover-IAM-ROLES'
AwsRegion = os.environ['AwsRegion']
RoleName = os.environ['RoleName']
CSAccountNumber = os.environ['CSAccountNumber']
CSAssumingRoleName = os.environ['CSAssumingRoleName']
LogArchiveBucketRegion = os.environ['LogArchiveBucketRegion']
LogArchiveAccount = os.environ['LogArchiveAccount']
CredentialsSecret = os.environ['CrowdstrikeCredentialsSecret']
RoleCreationDelayTimer = os.environ['RoleCreationDelayTimer']
#
# Moved to virtual hosted-style URLs.
# See https://aws.amazon.com/fr/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/
# path-style URLs to be depricated
#
CrowdstrikeTemplateUrl = f'https://crowdstrike-sa-resources-ct-{AwsRegion}.s3.amazonaws.com/ct_crowdstrike_stacksetv3.yaml'
AccountId = get_master_id()
cList = ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']
ExecRole = 'AWSControlTowerExecution'
AdminRoleARN = 'arn:aws:iam::' + AccountId + ':role/service-role/AWSControlTowerStackSetRole'
logger.info('EVENT Received: {}'.format(event))
response_data = {}
if event['RequestType'] in ['Create']:
logger.info('Event = ' + event['RequestType'])
# Parameters for CRWD-Discover stackset
CRWD_Discover_paramList = []
secretList = json.loads(get_secret_value(CredentialsSecret))
keyDict = {}
# LocalAccount:
for s in secretList.keys():
keyDict = {'ParameterKey': s, 'ParameterValue': secretList[s]}
CRWD_Discover_paramList.append(dict(keyDict))
ExternalID = get_random_alphanum_string(8)
keyDict['ParameterKey'] = 'ExternalID'
keyDict['ParameterValue'] = ExternalID
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'RoleCreationDelayTimer'
keyDict['ParameterValue'] = RoleCreationDelayTimer
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'RoleName'
keyDict['ParameterValue'] = RoleName
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'CSAccountNumber'
keyDict['ParameterValue'] = CSAccountNumber
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'CSAssumingRoleName'
keyDict['ParameterValue'] = CSAssumingRoleName
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'LogArchiveBucketRegion'
keyDict['ParameterValue'] = LogArchiveBucketRegion
CRWD_Discover_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'LogArchiveAccount'
keyDict['ParameterValue'] = LogArchiveAccount
CRWD_Discover_paramList.append(dict(keyDict))
logger.info('CRWD_Discover ParamList:{}'.format(CRWD_Discover_paramList))
logger.info('AdminRoleARN: {}'.format(AdminRoleARN))
logger.info('CrowdstrikeTemplateUrl: {}'.format(CrowdstrikeTemplateUrl))
logger.info('ExecRole: {}'.format(ExecRole))
logger.info('ExecRole: {}'.format(cList))
CRWD_Discover_result = launch_crwd_discover(CrowdstrikeTemplateUrl, CRWD_Discover_paramList, AdminRoleARN,
ExecRole, cList, STACKSETNAME)
logger.info('CRWD-Discover Stackset: {}'.format(CRWD_Discover_result))
if CRWD_Discover_result:
cfnresponse_send(event, context, SUCCESS, CRWD_Discover_result, "CustomResourcePhysicalID")
return
else:
cfnresponse_send(event, context, FAILED, CRWD_Discover_result, "CustomResourcePhysicalID")
return
elif event['RequestType'] in ['Update']:
logger.info('Event = ' + event['RequestType'])
cfnresponse_send(event, context, 'SUCCESS', response_data, "CustomResourcePhysicalID")
return
elif event['RequestType'] in ['Delete']:
logger.info('Event = ' + event['RequestType'])
delete_stackset(STACKSETNAME)
response_data["Status"] = "Success"
cfnresponse_send(event, context, 'SUCCESS', response_data, "CustomResourcePhysicalID")
return
raise Exception
except Exception as e:
logger.error(e)
response_data = {"Status": str(e)}
cfnresponse_send(event, context, 'FAILED', response_data, "CustomResourcePhysicalID")
return
| [] | [] | [
"CrowdstrikeCredentialsSecret",
"LogArchiveBucketRegion",
"CSAccountNumber",
"RoleName",
"LogArchiveAccount",
"RoleCreationDelayTimer",
"CSAssumingRoleName",
"AwsRegion"
] | [] | ["CrowdstrikeCredentialsSecret", "LogArchiveBucketRegion", "CSAccountNumber", "RoleName", "LogArchiveAccount", "RoleCreationDelayTimer", "CSAssumingRoleName", "AwsRegion"] | python | 8 | 0 | |
GooglePageInsights/src/googlepageinsights/main.go | package main
import (
"googlepageinsights/lib"
"log"
"os"
"github.com/puzanov/mongostorage"
)
func main() {
api := lib.NewAPI(os.Getenv("KEY"))
speed, json, err := api.GetPageInsights(os.Getenv("URL"), os.Getenv("STRATEGY"))
if err != nil {
log.Print("Error getting json from PageInsights")
log.Fatal(err)
}
storage := mongostorage.NewItem()
storage.Name = os.Getenv("URL") + " " + os.Getenv("STRATEGY")
storage.Collection = "pageinsights"
storage.Value = speed
storage.JSON = json
err = storage.Save()
if err != nil {
log.Print("Error saving PageInsights data into MongoDB")
log.Fatal(err)
}
}
| [
"\"KEY\"",
"\"URL\"",
"\"STRATEGY\"",
"\"URL\"",
"\"STRATEGY\""
] | [] | [
"URL",
"STRATEGY",
"KEY"
] | [] | ["URL", "STRATEGY", "KEY"] | go | 3 | 0 | |
push_message/push.py | import os
from linebot.models import TextSendMessage
if os.getenv('DEVELOPMENT') is not None:
from dotenv import load_dotenv
load_dotenv(dotenv_path='../.env')
import sys
from linebot import LineBotApi
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN') or 'YOUR_ACCESS_TOKEN'
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
# Example: https://github.com/line/line-bot-sdk-python#push_messageself-to-messages-notification_disabledfalse-timeoutnone
# Document: https://developers.line.biz/en/reference/messaging-api/#send-push-message
to = '' # Fill the USER_ID
line_bot_api.push_message(to, TextSendMessage(text='Hello this is push message test!'))
| [] | [] | [
"DEVELOPMENT",
"LINE_CHANNEL_ACCESS_TOKEN"
] | [] | ["DEVELOPMENT", "LINE_CHANNEL_ACCESS_TOKEN"] | python | 2 | 0 | |
spanner/client_test.go | /*
Copyright 2017 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"context"
"fmt"
"io"
"math/big"
"os"
"strings"
"testing"
"time"
"cloud.google.com/go/civil"
itestutil "cloud.google.com/go/internal/testutil"
vkit "cloud.google.com/go/spanner/apiv1"
. "cloud.google.com/go/spanner/internal/testutil"
structpb "github.com/golang/protobuf/ptypes/struct"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
sppb "google.golang.org/genproto/googleapis/spanner/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func setupMockedTestServer(t *testing.T) (server *MockedSpannerInMemTestServer, client *Client, teardown func()) {
return setupMockedTestServerWithConfig(t, ClientConfig{})
}
func setupMockedTestServerWithConfig(t *testing.T, config ClientConfig) (server *MockedSpannerInMemTestServer, client *Client, teardown func()) {
return setupMockedTestServerWithConfigAndClientOptions(t, config, []option.ClientOption{})
}
func setupMockedTestServerWithConfigAndClientOptions(t *testing.T, config ClientConfig, clientOptions []option.ClientOption) (server *MockedSpannerInMemTestServer, client *Client, teardown func()) {
grpcHeaderChecker := &itestutil.HeadersEnforcer{
OnFailure: t.Fatalf,
Checkers: []*itestutil.HeaderChecker{
{
Key: "x-goog-api-client",
ValuesValidator: func(token ...string) error {
if len(token) != 1 {
return status.Errorf(codes.Internal, "unexpected number of api client token headers: %v", len(token))
}
if !strings.HasPrefix(token[0], "gl-go/") {
return status.Errorf(codes.Internal, "unexpected api client token: %v", token[0])
}
if !strings.Contains(token[0], "gccl/") {
return status.Errorf(codes.Internal, "unexpected api client token: %v", token[0])
}
return nil
},
},
},
}
clientOptions = append(clientOptions, grpcHeaderChecker.CallOptions()...)
server, opts, serverTeardown := NewMockedSpannerInMemTestServer(t)
opts = append(opts, clientOptions...)
ctx := context.Background()
formattedDatabase := fmt.Sprintf("projects/%s/instances/%s/databases/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]")
client, err := NewClientWithConfig(ctx, formattedDatabase, config, opts...)
if err != nil {
t.Fatal(err)
}
return server, client, func() {
client.Close()
serverTeardown()
}
}
// Test validDatabaseName()
func TestValidDatabaseName(t *testing.T) {
validDbURI := "projects/spanner-cloud-test/instances/foo/databases/foodb"
invalidDbUris := []string{
// Completely wrong DB URI.
"foobarDB",
// Project ID contains "/".
"projects/spanner-cloud/test/instances/foo/databases/foodb",
// No instance ID.
"projects/spanner-cloud-test/instances//databases/foodb",
}
if err := validDatabaseName(validDbURI); err != nil {
t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbURI, err)
}
for _, d := range invalidDbUris {
if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) {
t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbURI, err, wantErr)
}
}
}
func TestReadOnlyTransactionClose(t *testing.T) {
// Closing a ReadOnlyTransaction shouldn't panic.
c := &Client{}
tx := c.ReadOnlyTransaction()
tx.Close()
}
func TestClient_Single(t *testing.T) {
t.Parallel()
err := testSingleQuery(t, nil)
if err != nil {
t.Fatal(err)
}
}
func TestClient_Single_Unavailable(t *testing.T) {
t.Parallel()
err := testSingleQuery(t, status.Error(codes.Unavailable, "Temporary unavailable"))
if err != nil {
t.Fatal(err)
}
}
func TestClient_Single_InvalidArgument(t *testing.T) {
t.Parallel()
err := testSingleQuery(t, status.Error(codes.InvalidArgument, "Invalid argument"))
if status.Code(err) != codes.InvalidArgument {
t.Fatalf("got: %v, want: %v", err, codes.InvalidArgument)
}
}
func TestClient_Single_SessionNotFound(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodExecuteStreamingSql,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
ctx := context.Background()
iter := client.Single().Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
t.Fatalf("row count mismatch\nGot: %v\nWant: %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
}
func TestClient_Single_Read_SessionNotFound(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodStreamingRead,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
ctx := context.Background()
iter := client.Single().Read(ctx, "Albums", KeySets(Key{"foo"}), []string{"SingerId", "AlbumId", "AlbumTitle"})
defer iter.Stop()
rowCount := int64(0)
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
t.Fatalf("row count mismatch\nGot: %v\nWant: %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
}
func TestClient_Single_ReadRow_SessionNotFound(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodStreamingRead,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
ctx := context.Background()
row, err := client.Single().ReadRow(ctx, "Albums", Key{"foo"}, []string{"SingerId", "AlbumId", "AlbumTitle"})
if err != nil {
t.Fatalf("Unexpected error for read row: %v", err)
}
if row == nil {
t.Fatal("ReadRow did not return a row")
}
}
func TestClient_Single_RetryableErrorOnPartialResultSet(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
// Add two errors that will be returned by the mock server when the client
// is trying to fetch a partial result set. Both errors are retryable.
// The errors are not 'sticky' on the mocked server, i.e. once the error
// has been returned once, the next call for the same partial result set
// will succeed.
// When the client is fetching the partial result set with resume token 2,
// the mock server will respond with an internal error with the message
// 'stream terminated by RST_STREAM'. The client will retry the call to get
// this partial result set.
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(2),
Err: status.Errorf(codes.Internal, "stream terminated by RST_STREAM"),
},
)
// When the client is fetching the partial result set with resume token 3,
// the mock server will respond with a 'Unavailable' error. The client will
// retry the call to get this partial result set.
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(3),
Err: status.Errorf(codes.Unavailable, "server is unavailable"),
},
)
ctx := context.Background()
if err := executeSingerQuery(ctx, client.Single()); err != nil {
t.Fatal(err)
}
}
func TestClient_Single_NonRetryableErrorOnPartialResultSet(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
// Add two errors that will be returned by the mock server when the client
// is trying to fetch a partial result set. The first error is retryable,
// the second is not.
// This error will automatically be retried.
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(2),
Err: status.Errorf(codes.Internal, "stream terminated by RST_STREAM"),
},
)
// 'Session not found' is not retryable and the error will be returned to
// the user.
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(3),
Err: newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s"),
},
)
ctx := context.Background()
err := executeSingerQuery(ctx, client.Single())
if status.Code(err) != codes.NotFound {
t.Fatalf("Error mismatch:\ngot: %v\nwant: %v", err, codes.NotFound)
}
}
func TestClient_Single_NonRetryableInternalErrors(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(2),
Err: status.Errorf(codes.Internal, "grpc: error while marshaling: string field contains invalid UTF-8"),
},
)
ctx := context.Background()
err := executeSingerQuery(ctx, client.Single())
if status.Code(err) != codes.Internal {
t.Fatalf("Error mismatch:\ngot: %v\nwant: %v", err, codes.Internal)
}
}
func TestClient_Single_DeadlineExceeded_NoErrors(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(MethodExecuteStreamingSql,
SimulatedExecutionTime{
MinimumExecutionTime: 50 * time.Millisecond,
})
ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(5*time.Millisecond))
defer cancel()
err := executeSingerQuery(ctx, client.Single())
if status.Code(err) != codes.DeadlineExceeded {
t.Fatalf("Error mismatch:\ngot: %v\nwant: %v", err, codes.DeadlineExceeded)
}
}
func TestClient_Single_DeadlineExceeded_WithErrors(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(2),
Err: status.Errorf(codes.Internal, "stream terminated by RST_STREAM"),
},
)
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(3),
Err: status.Errorf(codes.Unavailable, "server is unavailable"),
ExecutionTime: 50 * time.Millisecond,
},
)
ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(25*time.Millisecond))
defer cancel()
err := executeSingerQuery(ctx, client.Single())
if status.Code(err) != codes.DeadlineExceeded {
t.Fatalf("got unexpected error %v, expected DeadlineExceeded", err)
}
}
func TestClient_Single_ContextCanceled_noDeclaredServerErrors(t *testing.T) {
t.Parallel()
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
cancel()
err := executeSingerQuery(ctx, client.Single())
if status.Code(err) != codes.Canceled {
t.Fatalf("got unexpected error %v, expected Canceled", err)
}
}
func TestClient_Single_ContextCanceled_withDeclaredServerErrors(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(2),
Err: status.Errorf(codes.Internal, "stream terminated by RST_STREAM"),
},
)
server.TestSpanner.AddPartialResultSetError(
SelectSingerIDAlbumIDAlbumTitleFromAlbums,
PartialResultSetExecutionTime{
ResumeToken: EncodeResumeToken(3),
Err: status.Errorf(codes.Unavailable, "server is unavailable"),
},
)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
f := func(rowCount int64) error {
if rowCount == 2 {
cancel()
}
return nil
}
iter := client.Single().Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
err := executeSingerQueryWithRowFunc(ctx, client.Single(), f)
if status.Code(err) != codes.Canceled {
t.Fatalf("got unexpected error %v, expected Canceled", err)
}
}
func TestClient_Single_QueryOptions(t *testing.T) {
for _, tt := range queryOptionsTestCases() {
t.Run(tt.name, func(t *testing.T) {
if tt.env.Options != nil {
unset := setQueryOptionsEnvVars(tt.env.Options)
defer unset()
}
ctx := context.Background()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: tt.client})
defer teardown()
var iter *RowIterator
if tt.query.Options == nil {
iter = client.Single().Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
} else {
iter = client.Single().QueryWithOptions(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), tt.query)
}
testQueryOptions(t, iter, server.TestSpanner, tt.want)
})
}
}
func TestClient_ReturnDatabaseName(t *testing.T) {
t.Parallel()
_, client, teardown := setupMockedTestServer(t)
defer teardown()
got := client.DatabaseName()
want := "projects/[PROJECT]/instances/[INSTANCE]/databases/[DATABASE]"
if got != want {
t.Fatalf("Incorrect database name returned, got: %s, want: %s", got, want)
}
}
func testQueryOptions(t *testing.T, iter *RowIterator, server InMemSpannerServer, qo QueryOptions) {
defer iter.Stop()
_, err := iter.Next()
if err != nil {
t.Fatalf("Failed to read from the iterator: %v", err)
}
checkReqsForQueryOptions(t, server, qo)
}
func checkReqsForQueryOptions(t *testing.T, server InMemSpannerServer, qo QueryOptions) {
reqs := drainRequestsFromServer(server)
sqlReqs := []*sppb.ExecuteSqlRequest{}
for _, req := range reqs {
if sqlReq, ok := req.(*sppb.ExecuteSqlRequest); ok {
sqlReqs = append(sqlReqs, sqlReq)
}
}
if got, want := len(sqlReqs), 1; got != want {
t.Fatalf("Length mismatch, got %v, want %v", got, want)
}
reqQueryOptions := sqlReqs[0].QueryOptions
if got, want := reqQueryOptions.OptimizerVersion, qo.Options.OptimizerVersion; got != want {
t.Fatalf("Optimizer version mismatch, got %v, want %v", got, want)
}
if got, want := reqQueryOptions.OptimizerStatisticsPackage, qo.Options.OptimizerStatisticsPackage; got != want {
t.Fatalf("Optimizer statistics package mismatch, got %v, want %v", got, want)
}
}
func testSingleQuery(t *testing.T, serverError error) error {
ctx := context.Background()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
if serverError != nil {
server.TestSpanner.SetError(serverError)
}
return executeSingerQuery(ctx, client.Single())
}
func executeSingerQuery(ctx context.Context, tx *ReadOnlyTransaction) error {
return executeSingerQueryWithRowFunc(ctx, tx, nil)
}
func executeSingerQueryWithRowFunc(ctx context.Context, tx *ReadOnlyTransaction, f func(rowCount int64) error) error {
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
return err
}
rowCount++
if f != nil {
if err := f(rowCount); err != nil {
return err
}
}
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
return status.Errorf(codes.Internal, "Row count mismatch, got %v, expected %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
return nil
}
func createSimulatedExecutionTimeWithTwoUnavailableErrors(method string) map[string]SimulatedExecutionTime {
errors := make([]error, 2)
errors[0] = status.Error(codes.Unavailable, "Temporary unavailable")
errors[1] = status.Error(codes.Unavailable, "Temporary unavailable")
executionTimes := make(map[string]SimulatedExecutionTime)
executionTimes[method] = SimulatedExecutionTime{
Errors: errors,
}
return executionTimes
}
func TestClient_ReadOnlyTransaction(t *testing.T) {
t.Parallel()
if err := testReadOnlyTransaction(t, make(map[string]SimulatedExecutionTime)); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_UnavailableOnSessionCreate(t *testing.T) {
t.Parallel()
if err := testReadOnlyTransaction(t, createSimulatedExecutionTimeWithTwoUnavailableErrors(MethodCreateSession)); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_UnavailableOnBeginTransaction(t *testing.T) {
t.Parallel()
if err := testReadOnlyTransaction(t, createSimulatedExecutionTimeWithTwoUnavailableErrors(MethodBeginTransaction)); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_UnavailableOnExecuteStreamingSql(t *testing.T) {
t.Parallel()
if err := testReadOnlyTransaction(t, createSimulatedExecutionTimeWithTwoUnavailableErrors(MethodExecuteStreamingSql)); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_SessionNotFoundOnExecuteStreamingSql(t *testing.T) {
t.Parallel()
// Session not found is not retryable for a query on a multi-use read-only
// transaction, as we would need to start a new transaction on a new
// session.
err := testReadOnlyTransaction(t, map[string]SimulatedExecutionTime{
MethodExecuteStreamingSql: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
})
want := ToSpannerError(newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s"))
if err == nil {
t.Fatalf("missing expected error\nGot: nil\nWant: %v", want)
}
if status.Code(err) != status.Code(want) || !strings.Contains(err.Error(), want.Error()) {
t.Fatalf("error mismatch\nGot: %v\nWant: %v", err, want)
}
}
func TestClient_ReadOnlyTransaction_UnavailableOnCreateSessionAndBeginTransaction(t *testing.T) {
t.Parallel()
exec := map[string]SimulatedExecutionTime{
MethodCreateSession: {Errors: []error{status.Error(codes.Unavailable, "Temporary unavailable")}},
MethodBeginTransaction: {Errors: []error{status.Error(codes.Unavailable, "Temporary unavailable")}},
}
if err := testReadOnlyTransaction(t, exec); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_UnavailableOnCreateSessionAndInvalidArgumentOnBeginTransaction(t *testing.T) {
t.Parallel()
exec := map[string]SimulatedExecutionTime{
MethodCreateSession: {Errors: []error{status.Error(codes.Unavailable, "Temporary unavailable")}},
MethodBeginTransaction: {Errors: []error{status.Error(codes.InvalidArgument, "Invalid argument")}},
}
if err := testReadOnlyTransaction(t, exec); err == nil {
t.Fatalf("Missing expected exception")
} else if status.Code(err) != codes.InvalidArgument {
t.Fatalf("Got unexpected exception: %v", err)
}
}
func TestClient_ReadOnlyTransaction_SessionNotFoundOnBeginTransaction(t *testing.T) {
t.Parallel()
if err := testReadOnlyTransaction(
t,
map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
},
); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_SessionNotFoundOnBeginTransaction_WithMaxOneSession(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(
t,
ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 0,
MaxOpened: 1,
},
})
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodBeginTransaction,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
tx := client.ReadOnlyTransaction()
defer tx.Close()
ctx := context.Background()
if err := executeSingerQuery(ctx, tx); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadOnlyTransaction_QueryOptions(t *testing.T) {
for _, tt := range queryOptionsTestCases() {
t.Run(tt.name, func(t *testing.T) {
if tt.env.Options != nil {
unset := setQueryOptionsEnvVars(tt.env.Options)
defer unset()
}
ctx := context.Background()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: tt.client})
defer teardown()
tx := client.ReadOnlyTransaction()
defer tx.Close()
var iter *RowIterator
if tt.query.Options == nil {
iter = tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
} else {
iter = tx.QueryWithOptions(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), tt.query)
}
testQueryOptions(t, iter, server.TestSpanner, tt.want)
})
}
}
func setQueryOptionsEnvVars(opts *sppb.ExecuteSqlRequest_QueryOptions) func() {
os.Setenv("SPANNER_OPTIMIZER_VERSION", opts.OptimizerVersion)
os.Setenv("SPANNER_OPTIMIZER_STATISTICS_PACKAGE", opts.OptimizerStatisticsPackage)
return func() {
defer os.Setenv("SPANNER_OPTIMIZER_VERSION", "")
defer os.Setenv("SPANNER_OPTIMIZER_STATISTICS_PACKAGE", "")
}
}
func testReadOnlyTransaction(t *testing.T, executionTimes map[string]SimulatedExecutionTime) error {
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for method, exec := range executionTimes {
server.TestSpanner.PutExecutionTime(method, exec)
}
tx := client.ReadOnlyTransaction()
defer tx.Close()
ctx := context.Background()
return executeSingerQuery(ctx, tx)
}
func TestClient_ReadWriteTransaction(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, make(map[string]SimulatedExecutionTime), 1); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransactionCommitAborted(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodCommitTransaction: {Errors: []error{status.Error(codes.Aborted, "Transaction aborted")}},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnCommit(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodCommitTransaction: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnBeginTransaction(t *testing.T) {
t.Parallel()
// We expect only 1 attempt, as the 'Session not found' error is already
//handled in the session pool where the session is prepared.
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
}, 1); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnBeginTransactionWithEmptySessionPool(t *testing.T) {
t.Parallel()
// There will be no prepared sessions in the pool, so the error will occur
// when the transaction tries to get a session from the pool. This will
// also be handled by the session pool, so the transaction itself does not
// need to retry, hence the expectedAttempts == 1.
if err := testReadWriteTransactionWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.0},
}, map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
}, 1); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnExecuteStreamingSql(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodExecuteStreamingSql: {Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnExecuteUpdate(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodExecuteSql,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
ctx := context.Background()
var attempts int
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
rowCount, err := tx.Update(ctx, NewStatement(UpdateBarSetFoo))
if err != nil {
return err
}
if g, w := rowCount, int64(UpdateBarSetFooRowCount); g != w {
return status.Errorf(codes.FailedPrecondition, "Row count mismatch\nGot: %v\nWant: %v", g, w)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("number of attempts mismatch:\nGot%d\nWant:%d", g, w)
}
}
func TestClient_ReadWriteTransaction_SessionNotFoundOnExecuteBatchUpdate(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(
MethodExecuteBatchDml,
SimulatedExecutionTime{Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")}},
)
ctx := context.Background()
var attempts int
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
rowCounts, err := tx.BatchUpdate(ctx, []Statement{NewStatement(UpdateBarSetFoo)})
if err != nil {
return err
}
if g, w := len(rowCounts), 1; g != w {
return status.Errorf(codes.FailedPrecondition, "Row counts length mismatch\nGot: %v\nWant: %v", g, w)
}
if g, w := rowCounts[0], int64(UpdateBarSetFooRowCount); g != w {
return status.Errorf(codes.FailedPrecondition, "Row count mismatch\nGot: %v\nWant: %v", g, w)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("number of attempts mismatch:\nGot%d\nWant:%d", g, w)
}
}
func TestClient_ReadWriteTransaction_Query_QueryOptions(t *testing.T) {
for _, tt := range queryOptionsTestCases() {
t.Run(tt.name, func(t *testing.T) {
if tt.env.Options != nil {
unset := setQueryOptionsEnvVars(tt.env.Options)
defer unset()
}
ctx := context.Background()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: tt.client})
defer teardown()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
var iter *RowIterator
if tt.query.Options == nil {
iter = tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
} else {
iter = tx.QueryWithOptions(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), tt.query)
}
testQueryOptions(t, iter, server.TestSpanner, tt.want)
return nil
})
if err != nil {
t.Fatal(err)
}
})
}
}
func TestClient_ReadWriteTransaction_Update_QueryOptions(t *testing.T) {
for _, tt := range queryOptionsTestCases() {
t.Run(tt.name, func(t *testing.T) {
if tt.env.Options != nil {
unset := setQueryOptionsEnvVars(tt.env.Options)
defer unset()
}
ctx := context.Background()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: tt.client})
defer teardown()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
var rowCount int64
var err error
if tt.query.Options == nil {
rowCount, err = tx.Update(ctx, NewStatement(UpdateBarSetFoo))
} else {
rowCount, err = tx.UpdateWithOptions(ctx, NewStatement(UpdateBarSetFoo), tt.query)
}
if got, want := rowCount, int64(5); got != want {
t.Fatalf("Incorrect updated row count: got %v, want %v", got, want)
}
return err
})
if err != nil {
t.Fatalf("Failed to update rows: %v", err)
}
checkReqsForQueryOptions(t, server.TestSpanner, tt.want)
})
}
}
func TestClient_ReadWriteTransactionWithOptions(t *testing.T) {
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
resp, err := client.ReadWriteTransactionWithOptions(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
return err
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
return status.Errorf(codes.FailedPrecondition, "Row count mismatch, got %v, expected %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
return nil
}, TransactionOptions{CommitOptions: CommitOptions{ReturnCommitStats: true}})
if err != nil {
t.Fatalf("Failed to execute the transaction: %s", err)
}
if got, want := resp.CommitStats.MutationCount, int64(1); got != want {
t.Fatalf("Mismatch mutation count - got: %d, want: %d", got, want)
}
}
func TestClient_ReadWriteStmtBasedTransactionWithOptions(t *testing.T) {
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
tx, err := NewReadWriteStmtBasedTransactionWithOptions(
ctx,
client,
TransactionOptions{CommitOptions: CommitOptions{ReturnCommitStats: true}})
if err != nil {
t.Fatalf("Unexpected error when creating transaction: %v", err)
}
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatalf("Unexpected error when fetching query results: %v", err)
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
t.Fatalf("Unexpected error when getting query data: %v", err)
}
rowCount++
}
resp, err := tx.CommitWithReturnResp(ctx)
if err != nil {
t.Fatalf("Unexpected error when committing transaction: %v", err)
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
t.Errorf("Row count mismatch, got %v, expected %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
if got, want := resp.CommitStats.MutationCount, int64(1); got != want {
t.Fatalf("Mismatch mutation count - got: %d, want: %d", got, want)
}
}
func TestClient_ReadWriteTransaction_DoNotLeakSessionOnPanic(t *testing.T) {
// Make sure that there is always only one session in the pool.
sc := SessionPoolConfig{
MinOpened: 1,
MaxOpened: 1,
}
_, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{SessionPoolConfig: sc})
defer teardown()
ctx := context.Background()
// If a panic occurs during a transaction, the session will not leak.
func() {
defer func() { recover() }()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
panic("cause panic")
return nil
})
if err != nil {
t.Fatalf("Unexpected error during transaction: %v", err)
}
}()
if g, w := client.idleSessions.idleList.Len(), 1; g != w {
t.Fatalf("idle session count mismatch.\nGot: %v\nWant: %v", g, w)
}
}
func TestClient_SessionNotFound(t *testing.T) {
// Ensure we always have at least one session in the pool.
sc := SessionPoolConfig{
MinOpened: 1,
}
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{SessionPoolConfig: sc})
defer teardown()
ctx := context.Background()
for {
client.idleSessions.mu.Lock()
numSessions := client.idleSessions.idleList.Len()
client.idleSessions.mu.Unlock()
if numSessions > 0 {
break
}
time.After(time.Millisecond)
}
// Remove the session from the server without the pool knowing it.
_, err := server.TestSpanner.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: client.idleSessions.idleList.Front().Value.(*session).id})
if err != nil {
t.Fatalf("Failed to delete session unexpectedly: %v", err)
}
_, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
return err
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
return spannerErrorf(codes.FailedPrecondition, "Row count mismatch, got %v, expected %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
return nil
})
if err != nil {
t.Fatalf("Unexpected error during transaction: %v", err)
}
}
func TestClient_ReadWriteTransactionExecuteStreamingSqlAborted(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodExecuteStreamingSql: {Errors: []error{status.Error(codes.Aborted, "Transaction aborted")}},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_UnavailableOnBeginTransaction(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{status.Error(codes.Unavailable, "Unavailable")}},
}, 1); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_UnavailableOnBeginAndAbortOnCommit(t *testing.T) {
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{status.Error(codes.Unavailable, "Unavailable")}},
MethodCommitTransaction: {Errors: []error{status.Error(codes.Aborted, "Aborted")}},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_UnavailableOnExecuteStreamingSql(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodExecuteStreamingSql: {Errors: []error{status.Error(codes.Unavailable, "Unavailable")}},
}, 1); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_UnavailableOnBeginAndExecuteStreamingSqlAndTwiceAbortOnCommit(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodBeginTransaction: {Errors: []error{status.Error(codes.Unavailable, "Unavailable")}},
MethodExecuteStreamingSql: {Errors: []error{status.Error(codes.Unavailable, "Unavailable")}},
MethodCommitTransaction: {Errors: []error{status.Error(codes.Aborted, "Aborted"), status.Error(codes.Aborted, "Aborted")}},
}, 3); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransaction_CommitAborted(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
server.TestSpanner.PutExecutionTime(MethodCommitTransaction, SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Aborted")},
})
defer teardown()
ctx := context.Background()
attempts := 0
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
_, err := tx.Update(ctx, Statement{SQL: UpdateBarSetFoo})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("attempt count mismatch:\nWant: %v\nGot: %v", w, g)
}
}
func TestClient_ReadWriteTransaction_DMLAborted(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
server.TestSpanner.PutExecutionTime(MethodExecuteSql, SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Aborted")},
})
defer teardown()
ctx := context.Background()
attempts := 0
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
_, err := tx.Update(ctx, Statement{SQL: UpdateBarSetFoo})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("attempt count mismatch:\nWant: %v\nGot: %v", w, g)
}
}
func TestClient_ReadWriteTransaction_BatchDMLAborted(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
server.TestSpanner.PutExecutionTime(MethodExecuteBatchDml, SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Aborted")},
})
defer teardown()
ctx := context.Background()
attempts := 0
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
_, err := tx.BatchUpdate(ctx, []Statement{{SQL: UpdateBarSetFoo}})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("attempt count mismatch:\nWant: %v\nGot: %v", w, g)
}
}
func TestClient_ReadWriteTransaction_BatchDMLAbortedHalfway(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
abortedStatement := "UPDATE FOO_ABORTED SET BAR=1 WHERE ID=2"
server.TestSpanner.PutStatementResult(
abortedStatement,
&StatementResult{
Type: StatementResultError,
Err: status.Error(codes.Aborted, "Statement was aborted"),
},
)
ctx := context.Background()
var updateCounts []int64
attempts := 0
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
if attempts > 1 {
// Replace the aborted result with a real result to prevent the
// transaction from aborting indefinitely.
server.TestSpanner.PutStatementResult(
abortedStatement,
&StatementResult{
Type: StatementResultUpdateCount,
UpdateCount: 3,
},
)
}
var err error
updateCounts, err = tx.BatchUpdate(ctx, []Statement{{SQL: abortedStatement}, {SQL: UpdateBarSetFoo}})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("attempt count mismatch:\nWant: %v\nGot: %v", w, g)
}
if g, w := updateCounts, []int64{3, UpdateBarSetFooRowCount}; !testEqual(w, g) {
t.Fatalf("update count mismatch\nWant: %v\nGot: %v", w, g)
}
}
func TestClient_ReadWriteTransaction_QueryAborted(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
server.TestSpanner.PutExecutionTime(MethodExecuteStreamingSql, SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Aborted")},
})
defer teardown()
ctx := context.Background()
attempts := 0
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
iter := tx.Query(ctx, Statement{SQL: SelectFooFromBar})
defer iter.Stop()
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
if g, w := attempts, 2; g != w {
t.Fatalf("attempt count mismatch:\nWant: %v\nGot: %v", w, g)
}
}
func TestClient_ReadWriteTransaction_AbortedOnExecuteStreamingSqlAndCommit(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodExecuteStreamingSql: {Errors: []error{status.Error(codes.Aborted, "Aborted")}},
MethodCommitTransaction: {Errors: []error{status.Error(codes.Aborted, "Aborted"), status.Error(codes.Aborted, "Aborted")}},
}, 4); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransactionCommitAbortedAndUnavailable(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodCommitTransaction: {
Errors: []error{
status.Error(codes.Aborted, "Transaction aborted"),
status.Error(codes.Unavailable, "Unavailable"),
},
},
}, 2); err != nil {
t.Fatal(err)
}
}
func TestClient_ReadWriteTransactionCommitAlreadyExists(t *testing.T) {
t.Parallel()
if err := testReadWriteTransaction(t, map[string]SimulatedExecutionTime{
MethodCommitTransaction: {Errors: []error{status.Error(codes.AlreadyExists, "A row with this key already exists")}},
}, 1); err != nil {
if status.Code(err) != codes.AlreadyExists {
t.Fatalf("Got unexpected error %v, expected %v", err, codes.AlreadyExists)
}
} else {
t.Fatalf("Missing expected exception")
}
}
func testReadWriteTransaction(t *testing.T, executionTimes map[string]SimulatedExecutionTime, expectedAttempts int) error {
return testReadWriteTransactionWithConfig(t, ClientConfig{SessionPoolConfig: DefaultSessionPoolConfig}, executionTimes, expectedAttempts)
}
func testReadWriteTransactionWithConfig(t *testing.T, config ClientConfig, executionTimes map[string]SimulatedExecutionTime, expectedAttempts int) error {
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for method, exec := range executionTimes {
server.TestSpanner.PutExecutionTime(method, exec)
}
ctx := context.Background()
var attempts int
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
rowCount := int64(0)
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
return err
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
return status.Errorf(codes.FailedPrecondition, "Row count mismatch, got %v, expected %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
return nil
})
if err != nil {
return err
}
if expectedAttempts != attempts {
t.Fatalf("unexpected number of attempts: %d, expected %d", attempts, expectedAttempts)
}
return nil
}
func TestClient_ApplyAtLeastOnce(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
ms := []*Mutation{
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}),
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}),
}
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Transaction aborted")},
})
_, err := client.Apply(context.Background(), ms, ApplyAtLeastOnce())
if err != nil {
t.Fatal(err)
}
}
func TestClient_ApplyAtLeastOnceReuseSession(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 0,
WriteSessions: 0.0,
TrackSessionHandles: true,
},
})
defer teardown()
sp := client.idleSessions
ms := []*Mutation{
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}),
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}),
}
for i := 0; i < 10; i++ {
_, err := client.Apply(context.Background(), ms, ApplyAtLeastOnce())
if err != nil {
t.Fatal(err)
}
sp.mu.Lock()
if g, w := uint64(sp.idleList.Len())+sp.createReqs, sp.incStep; g != w {
t.Fatalf("idle session count mismatch:\nGot: %v\nWant: %v", g, w)
}
if g, w := uint64(len(server.TestSpanner.DumpSessions())), sp.incStep; g != w {
t.Fatalf("server session count mismatch:\nGot: %v\nWant: %v", g, w)
}
sp.mu.Unlock()
}
// There should be no sessions marked as checked out.
sp.mu.Lock()
g, w := sp.trackedSessionHandles.Len(), 0
sp.mu.Unlock()
if g != w {
t.Fatalf("checked out sessions count mismatch:\nGot: %v\nWant: %v", g, w)
}
}
func TestClient_ApplyAtLeastOnceInvalidArgument(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 0,
WriteSessions: 0.0,
TrackSessionHandles: true,
},
})
defer teardown()
sp := client.idleSessions
ms := []*Mutation{
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}),
Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}),
}
for i := 0; i < 10; i++ {
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
Errors: []error{status.Error(codes.InvalidArgument, "Invalid data")},
})
_, err := client.Apply(context.Background(), ms, ApplyAtLeastOnce())
if status.Code(err) != codes.InvalidArgument {
t.Fatal(err)
}
sp.mu.Lock()
if g, w := uint64(sp.idleList.Len())+sp.createReqs, sp.incStep; g != w {
t.Fatalf("idle session count mismatch:\nGot: %v\nWant: %v", g, w)
}
if g, w := uint64(len(server.TestSpanner.DumpSessions())), sp.incStep; g != w {
t.Fatalf("server session count mismatch:\nGot: %v\nWant: %v", g, w)
}
sp.mu.Unlock()
}
// There should be no sessions marked as checked out.
client.idleSessions.mu.Lock()
g, w := client.idleSessions.trackedSessionHandles.Len(), 0
client.idleSessions.mu.Unlock()
if g != w {
t.Fatalf("checked out sessions count mismatch:\nGot: %v\nWant: %v", g, w)
}
}
func TestReadWriteTransaction_ErrUnexpectedEOF(t *testing.T) {
t.Parallel()
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
var attempts int
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
attempts++
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
var singerID, albumID int64
var albumTitle string
if err := row.Columns(&singerID, &albumID, &albumTitle); err != nil {
return err
}
}
return io.ErrUnexpectedEOF
})
if err != io.ErrUnexpectedEOF {
t.Fatalf("Missing expected error %v, got %v", io.ErrUnexpectedEOF, err)
}
if attempts != 1 {
t.Fatalf("unexpected number of attempts: %d, expected %d", attempts, 1)
}
}
func TestReadWriteTransaction_WrapError(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
// Abort the transaction on both the query as well as commit.
// The first abort error will be wrapped. The client will unwrap the cause
// of the error and retry the transaction. The aborted error on commit
// will not be wrapped, but will also be recognized by the client as an
// abort that should be retried.
server.TestSpanner.PutExecutionTime(MethodExecuteStreamingSql,
SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Transaction aborted")},
})
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
Errors: []error{status.Error(codes.Aborted, "Transaction aborted")},
})
msg := "query failed"
numAttempts := 0
ctx := context.Background()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
numAttempts++
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
// Wrap the error in another error that implements the
// (xerrors|errors).Wrapper interface.
return &wrappedTestError{err, msg}
}
}
return nil
})
if err != nil {
t.Fatalf("Unexpected error\nGot: %v\nWant: nil", err)
}
if g, w := numAttempts, 3; g != w {
t.Fatalf("Number of transaction attempts mismatch\nGot: %d\nWant: %d", w, w)
}
// Execute a transaction that returns a non-retryable error that is
// wrapped in a custom error. The transaction should return the custom
// error.
server.TestSpanner.PutExecutionTime(MethodExecuteStreamingSql,
SimulatedExecutionTime{
Errors: []error{status.Error(codes.NotFound, "Table not found")},
})
_, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
numAttempts++
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
// Wrap the error in another error that implements the
// (xerrors|errors).Wrapper interface.
return &wrappedTestError{err, msg}
}
}
return nil
})
if err == nil || err.Error() != msg {
t.Fatalf("Unexpected error\nGot: %v\nWant: %v", err, msg)
}
}
func TestReadWriteTransaction_WrapSessionNotFoundError(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
server.TestSpanner.PutExecutionTime(MethodBeginTransaction,
SimulatedExecutionTime{
Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")},
})
server.TestSpanner.PutExecutionTime(MethodExecuteStreamingSql,
SimulatedExecutionTime{
Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")},
})
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
Errors: []error{newSessionNotFoundError("projects/p/instances/i/databases/d/sessions/s")},
})
msg := "query failed"
numAttempts := 0
ctx := context.Background()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
numAttempts++
iter := tx.Query(ctx, NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
defer iter.Stop()
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
// Wrap the error in another error that implements the
// (xerrors|errors).Wrapper interface.
return &wrappedTestError{err, msg}
}
}
return nil
})
if err != nil {
t.Fatalf("Unexpected error\nGot: %v\nWant: nil", err)
}
// We want 3 attempts. The 'Session not found' error on BeginTransaction
// will not retry the entire transaction, which means that we will have two
// failed attempts and then a successful attempt.
if g, w := numAttempts, 3; g != w {
t.Fatalf("Number of transaction attempts mismatch\nGot: %d\nWant: %d", g, w)
}
}
func TestClient_WriteStructWithPointers(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
type T struct {
ID int64
Col1 *string
Col2 []*string
Col3 *bool
Col4 []*bool
Col5 *int64
Col6 []*int64
Col7 *float64
Col8 []*float64
Col9 *time.Time
Col10 []*time.Time
Col11 *civil.Date
Col12 []*civil.Date
}
t1 := T{
ID: 1,
Col2: []*string{nil},
Col4: []*bool{nil},
Col6: []*int64{nil},
Col8: []*float64{nil},
Col10: []*time.Time{nil},
Col12: []*civil.Date{nil},
}
s := "foo"
b := true
i := int64(100)
f := 3.14
tm := time.Now()
d := civil.DateOf(time.Now())
t2 := T{
ID: 2,
Col1: &s,
Col2: []*string{&s},
Col3: &b,
Col4: []*bool{&b},
Col5: &i,
Col6: []*int64{&i},
Col7: &f,
Col8: []*float64{&f},
Col9: &tm,
Col10: []*time.Time{&tm},
Col11: &d,
Col12: []*civil.Date{&d},
}
m1, err := InsertStruct("Tab", &t1)
if err != nil {
t.Fatal(err)
}
m2, err := InsertStruct("Tab", &t2)
if err != nil {
t.Fatal(err)
}
_, err = client.Apply(context.Background(), []*Mutation{m1, m2})
if err != nil {
t.Fatal(err)
}
requests := drainRequestsFromServer(server.TestSpanner)
for _, req := range requests {
if commit, ok := req.(*sppb.CommitRequest); ok {
if g, w := len(commit.Mutations), 2; w != g {
t.Fatalf("mutation count mismatch\nGot: %v\nWant: %v", g, w)
}
insert := commit.Mutations[0].GetInsert()
// The first insert should contain NULL values and arrays
// containing exactly one NULL element.
for i := 1; i < len(insert.Values[0].Values); i += 2 {
// The non-array columns should contain NULL values.
g, w := insert.Values[0].Values[i].GetKind(), &structpb.Value_NullValue{}
if _, ok := g.(*structpb.Value_NullValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, w)
}
// The array columns should not be NULL.
g, wList := insert.Values[0].Values[i+1].GetKind(), &structpb.Value_ListValue{}
if _, ok := g.(*structpb.Value_ListValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, wList)
}
// The array should contain 1 NULL value.
if gLength, wLength := len(insert.Values[0].Values[i+1].GetListValue().Values), 1; gLength != wLength {
t.Fatalf("list value length mismatch\nGot: %v\nWant: %v", gLength, wLength)
}
g, w = insert.Values[0].Values[i+1].GetListValue().Values[0].GetKind(), &structpb.Value_NullValue{}
if _, ok := g.(*structpb.Value_NullValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, w)
}
}
// The second insert should contain all non-NULL values.
insert = commit.Mutations[1].GetInsert()
for i := 1; i < len(insert.Values[0].Values); i += 2 {
// The non-array columns should contain non-NULL values.
g := insert.Values[0].Values[i].GetKind()
if _, ok := g.(*structpb.Value_NullValue); ok {
t.Fatalf("type mismatch\nGot: %v\nWant: non-NULL value", g)
}
// The array columns should also be non-NULL.
g, wList := insert.Values[0].Values[i+1].GetKind(), &structpb.Value_ListValue{}
if _, ok := g.(*structpb.Value_ListValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, wList)
}
// The array should contain exactly 1 non-NULL value.
if gLength, wLength := len(insert.Values[0].Values[i+1].GetListValue().Values), 1; gLength != wLength {
t.Fatalf("list value length mismatch\nGot: %v\nWant: %v", gLength, wLength)
}
g = insert.Values[0].Values[i+1].GetListValue().Values[0].GetKind()
if _, ok := g.(*structpb.Value_NullValue); ok {
t.Fatalf("type mismatch\nGot: %v\nWant: non-NULL value", g)
}
}
}
}
}
func TestClient_WriteStructWithCustomTypes(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
type CustomString string
type CustomBool bool
type CustomInt64 int64
type CustomFloat64 float64
type CustomTime time.Time
type CustomDate civil.Date
type T struct {
ID int64
Col1 CustomString
Col2 []CustomString
Col3 CustomBool
Col4 []CustomBool
Col5 CustomInt64
Col6 []CustomInt64
Col7 CustomFloat64
Col8 []CustomFloat64
Col9 CustomTime
Col10 []CustomTime
Col11 CustomDate
Col12 []CustomDate
}
t1 := T{
ID: 1,
Col2: []CustomString{},
Col4: []CustomBool{},
Col6: []CustomInt64{},
Col8: []CustomFloat64{},
Col10: []CustomTime{},
Col12: []CustomDate{},
}
t2 := T{
ID: 2,
Col1: "foo",
Col2: []CustomString{"foo"},
Col3: true,
Col4: []CustomBool{true},
Col5: 100,
Col6: []CustomInt64{100},
Col7: 3.14,
Col8: []CustomFloat64{3.14},
Col9: CustomTime(time.Now()),
Col10: []CustomTime{CustomTime(time.Now())},
Col11: CustomDate(civil.DateOf(time.Now())),
Col12: []CustomDate{CustomDate(civil.DateOf(time.Now()))},
}
m1, err := InsertStruct("Tab", &t1)
if err != nil {
t.Fatal(err)
}
m2, err := InsertStruct("Tab", &t2)
if err != nil {
t.Fatal(err)
}
_, err = client.Apply(context.Background(), []*Mutation{m1, m2})
if err != nil {
t.Fatal(err)
}
requests := drainRequestsFromServer(server.TestSpanner)
for _, req := range requests {
if commit, ok := req.(*sppb.CommitRequest); ok {
if g, w := len(commit.Mutations), 2; w != g {
t.Fatalf("mutation count mismatch\nGot: %v\nWant: %v", g, w)
}
insert1 := commit.Mutations[0].GetInsert()
row1 := insert1.Values[0]
// The first insert should contain empty values and empty arrays
for i := 1; i < len(row1.Values); i += 2 {
// The non-array columns should contain empty values.
g := row1.Values[i].GetKind()
if _, ok := g.(*structpb.Value_NullValue); ok {
t.Fatalf("type mismatch\nGot: %v\nWant: non-NULL value", g)
}
// The array columns should not be NULL.
g, wList := row1.Values[i+1].GetKind(), &structpb.Value_ListValue{}
if _, ok := g.(*structpb.Value_ListValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, wList)
}
}
// The second insert should contain all non-NULL values.
insert2 := commit.Mutations[1].GetInsert()
row2 := insert2.Values[0]
for i := 1; i < len(row2.Values); i += 2 {
// The non-array columns should contain non-NULL values.
g := row2.Values[i].GetKind()
if _, ok := g.(*structpb.Value_NullValue); ok {
t.Fatalf("type mismatch\nGot: %v\nWant: non-NULL value", g)
}
// The array columns should also be non-NULL.
g, wList := row2.Values[i+1].GetKind(), &structpb.Value_ListValue{}
if _, ok := g.(*structpb.Value_ListValue); !ok {
t.Fatalf("type mismatch\nGot: %v\nWant: %v", g, wList)
}
// The array should contain exactly 1 non-NULL value.
if gLength, wLength := len(row2.Values[i+1].GetListValue().Values), 1; gLength != wLength {
t.Fatalf("list value length mismatch\nGot: %v\nWant: %v", gLength, wLength)
}
g = row2.Values[i+1].GetListValue().Values[0].GetKind()
if _, ok := g.(*structpb.Value_NullValue); ok {
t.Fatalf("type mismatch\nGot: %v\nWant: non-NULL value", g)
}
}
}
}
}
func TestReadWriteTransaction_ContextTimeoutDuringCommit(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 1,
WriteSessions: 0,
},
})
defer teardown()
// Wait until session creation has seized so that
// context timeout won't happen while a session is being created.
waitFor(t, func() error {
sp := client.idleSessions
sp.mu.Lock()
defer sp.mu.Unlock()
if sp.createReqs != 0 {
return fmt.Errorf("%d sessions are still in creation", sp.createReqs)
}
return nil
})
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
MinimumExecutionTime: time.Minute,
})
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
tx.BufferWrite([]*Mutation{Insert("FOO", []string{"ID", "NAME"}, []interface{}{int64(1), "bar"})})
return nil
})
errContext, _ := context.WithTimeout(context.Background(), -time.Second)
w := toSpannerErrorWithCommitInfo(errContext.Err(), true).(*Error)
var se *Error
if !errorAs(err, &se) {
t.Fatalf("Error mismatch\nGot: %v\nWant: %v", err, w)
}
if se.GRPCStatus().Code() != w.GRPCStatus().Code() {
t.Fatalf("Error status mismatch:\nGot: %v\nWant: %v", se.GRPCStatus(), w.GRPCStatus())
}
if se.Error() != w.Error() {
t.Fatalf("Error message mismatch:\nGot %s\nWant: %s", se.Error(), w.Error())
}
var outcome *TransactionOutcomeUnknownError
if !errorAs(err, &outcome) {
t.Fatalf("Missing wrapped TransactionOutcomeUnknownError error")
}
}
func TestFailedCommit_NoRollback(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 0,
MaxOpened: 1,
WriteSessions: 0,
},
})
defer teardown()
server.TestSpanner.PutExecutionTime(MethodCommitTransaction,
SimulatedExecutionTime{
Errors: []error{status.Errorf(codes.InvalidArgument, "Invalid mutations")},
})
_, err := client.Apply(context.Background(), []*Mutation{
Insert("FOO", []string{"ID", "BAR"}, []interface{}{1, "value"}),
})
if got, want := status.Convert(err).Code(), codes.InvalidArgument; got != want {
t.Fatalf("Error mismatch\nGot: %v\nWant: %v", got, want)
}
// The failed commit should not trigger a rollback after the commit.
if _, err := shouldHaveReceived(server.TestSpanner, []interface{}{
&sppb.BatchCreateSessionsRequest{},
&sppb.BeginTransactionRequest{},
&sppb.CommitRequest{},
}); err != nil {
t.Fatalf("Received RPCs mismatch: %v", err)
}
}
func TestFailedUpdate_ShouldRollback(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: 0,
MaxOpened: 1,
WriteSessions: 0,
},
})
defer teardown()
server.TestSpanner.PutExecutionTime(MethodExecuteSql,
SimulatedExecutionTime{
Errors: []error{status.Errorf(codes.InvalidArgument, "Invalid update")},
})
_, err := client.ReadWriteTransaction(context.Background(), func(ctx context.Context, tx *ReadWriteTransaction) error {
_, err := tx.Update(ctx, NewStatement("UPDATE FOO SET BAR='value' WHERE ID=1"))
return err
})
if got, want := status.Convert(err).Code(), codes.InvalidArgument; got != want {
t.Fatalf("Error mismatch\nGot: %v\nWant: %v", got, want)
}
// The failed update should trigger a rollback.
if _, err := shouldHaveReceived(server.TestSpanner, []interface{}{
&sppb.BatchCreateSessionsRequest{},
&sppb.BeginTransactionRequest{},
&sppb.ExecuteSqlRequest{},
&sppb.RollbackRequest{},
}); err != nil {
t.Fatalf("Received RPCs mismatch: %v", err)
}
}
func TestClient_NumChannels(t *testing.T) {
t.Parallel()
configuredNumChannels := 8
_, client, teardown := setupMockedTestServerWithConfig(
t,
ClientConfig{NumChannels: configuredNumChannels},
)
defer teardown()
if g, w := client.sc.connPool.Num(), configuredNumChannels; g != w {
t.Fatalf("NumChannels mismatch\nGot: %v\nWant: %v", g, w)
}
}
func TestClient_WithGRPCConnectionPool(t *testing.T) {
t.Parallel()
configuredConnPool := 8
_, client, teardown := setupMockedTestServerWithConfigAndClientOptions(
t,
ClientConfig{},
[]option.ClientOption{option.WithGRPCConnectionPool(configuredConnPool)},
)
defer teardown()
if g, w := client.sc.connPool.Num(), configuredConnPool; g != w {
t.Fatalf("NumChannels mismatch\nGot: %v\nWant: %v", g, w)
}
}
func TestClient_WithGRPCConnectionPoolAndNumChannels(t *testing.T) {
t.Parallel()
configuredNumChannels := 8
configuredConnPool := 8
_, client, teardown := setupMockedTestServerWithConfigAndClientOptions(
t,
ClientConfig{NumChannels: configuredNumChannels},
[]option.ClientOption{option.WithGRPCConnectionPool(configuredConnPool)},
)
defer teardown()
if g, w := client.sc.connPool.Num(), configuredConnPool; g != w {
t.Fatalf("NumChannels mismatch\nGot: %v\nWant: %v", g, w)
}
}
func TestClient_WithGRPCConnectionPoolAndNumChannels_Misconfigured(t *testing.T) {
t.Parallel()
// Deliberately misconfigure NumChannels and ConnPool.
configuredNumChannels := 8
configuredConnPool := 16
_, opts, serverTeardown := NewMockedSpannerInMemTestServer(t)
defer serverTeardown()
opts = append(opts, option.WithGRPCConnectionPool(configuredConnPool))
_, err := NewClientWithConfig(context.Background(), "projects/p/instances/i/databases/d", ClientConfig{NumChannels: configuredNumChannels}, opts...)
msg := "Connection pool mismatch:"
if err == nil {
t.Fatalf("Error mismatch\nGot: nil\nWant: %s", msg)
}
var se *Error
if ok := errorAs(err, &se); !ok {
t.Fatalf("Error mismatch\nGot: %v\nWant: An instance of a Spanner error", err)
}
if g, w := se.GRPCStatus().Code(), codes.InvalidArgument; g != w {
t.Fatalf("Error code mismatch\nGot: %v\nWant: %v", g, w)
}
if !strings.Contains(se.Error(), msg) {
t.Fatalf("Error message mismatch\nGot: %s\nWant: %s", se.Error(), msg)
}
}
func TestClient_CallOptions(t *testing.T) {
t.Parallel()
co := &vkit.CallOptions{
CreateSession: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable, codes.DeadlineExceeded,
}, gax.Backoff{
Initial: 200 * time.Millisecond,
Max: 30000 * time.Millisecond,
Multiplier: 1.25,
})
}),
},
}
_, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{CallOptions: co})
defer teardown()
c, err := client.sc.nextClient()
if err != nil {
t.Fatalf("failed to get a session client: %v", err)
}
cs := &gax.CallSettings{}
// This is the default retry setting.
c.CallOptions.CreateSession[0].Resolve(cs)
if got, want := fmt.Sprintf("%v", cs.Retry()), "&{{250000000 32000000000 1.3 0} [14]}"; got != want {
t.Fatalf("merged CallOptions is incorrect: got %v, want %v", got, want)
}
// This is the custom retry setting.
c.CallOptions.CreateSession[1].Resolve(cs)
if got, want := fmt.Sprintf("%v", cs.Retry()), "&{{200000000 30000000000 1.25 0} [14 4]}"; got != want {
t.Fatalf("merged CallOptions is incorrect: got %v, want %v", got, want)
}
}
func TestClient_QueryWithCallOptions(t *testing.T) {
t.Parallel()
co := &vkit.CallOptions{
ExecuteSql: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
}, gax.Backoff{
Initial: 200 * time.Millisecond,
Max: 30000 * time.Millisecond,
Multiplier: 1.25,
})
}),
},
}
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{CallOptions: co})
server.TestSpanner.PutExecutionTime(MethodExecuteSql, SimulatedExecutionTime{
Errors: []error{status.Error(codes.DeadlineExceeded, "Deadline exceeded")},
})
defer teardown()
ctx := context.Background()
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error {
_, err := tx.Update(ctx, Statement{SQL: UpdateBarSetFoo})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
func TestClient_ShouldReceiveMetadataForEmptyResultSet(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
// This creates an empty result set.
res := server.CreateSingleRowSingersResult(SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
sql := "SELECT SingerId, AlbumId, AlbumTitle FROM Albums WHERE 1=2"
server.TestSpanner.PutStatementResult(sql, res)
defer teardown()
ctx := context.Background()
iter := client.Single().Query(ctx, NewStatement(sql))
defer iter.Stop()
row, err := iter.Next()
if err != iterator.Done {
t.Errorf("Query result mismatch:\nGot: %v\nWant: <no rows>", row)
}
metadata := iter.Metadata
if metadata == nil {
t.Fatalf("Missing ResultSet Metadata")
}
if metadata.RowType == nil {
t.Fatalf("Missing ResultSet RowType")
}
if metadata.RowType.Fields == nil {
t.Fatalf("Missing ResultSet Fields")
}
if g, w := len(metadata.RowType.Fields), 3; g != w {
t.Fatalf("Field count mismatch\nGot: %v\nWant: %v", g, w)
}
wantFieldNames := []string{"SingerId", "AlbumId", "AlbumTitle"}
for i, w := range wantFieldNames {
g := metadata.RowType.Fields[i].Name
if g != w {
t.Fatalf("Field[%v] name mismatch\nGot: %v\nWant: %v", i, g, w)
}
}
wantFieldTypes := []sppb.TypeCode{sppb.TypeCode_INT64, sppb.TypeCode_INT64, sppb.TypeCode_STRING}
for i, w := range wantFieldTypes {
g := metadata.RowType.Fields[i].Type.Code
if g != w {
t.Fatalf("Field[%v] type mismatch\nGot: %v\nWant: %v", i, g, w)
}
}
}
func TestClient_EncodeCustomFieldType(t *testing.T) {
t.Parallel()
type typesTable struct {
Int customStructToInt `spanner:"Int"`
String customStructToString `spanner:"String"`
Float customStructToFloat `spanner:"Float"`
Bool customStructToBool `spanner:"Bool"`
Time customStructToTime `spanner:"Time"`
Date customStructToDate `spanner:"Date"`
}
server, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
d := typesTable{
Int: customStructToInt{1, 23},
String: customStructToString{"A", "B"},
Float: customStructToFloat{1.23, 12.3},
Bool: customStructToBool{true, false},
Time: customStructToTime{"A", "B"},
Date: customStructToDate{"A", "B"},
}
m, err := InsertStruct("Types", &d)
if err != nil {
t.Fatalf("err: %v", err)
}
ms := []*Mutation{m}
_, err = client.Apply(ctx, ms)
if err != nil {
t.Fatalf("err: %v", err)
}
reqs := drainRequestsFromServer(server.TestSpanner)
for _, req := range reqs {
if commitReq, ok := req.(*sppb.CommitRequest); ok {
val := commitReq.Mutations[0].GetInsert().Values[0]
if got, want := val.Values[0].GetStringValue(), "123"; got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[0].GetKind(), want)
}
if got, want := val.Values[1].GetStringValue(), "A-B"; got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[1].GetKind(), want)
}
if got, want := val.Values[2].GetNumberValue(), float64(123.123); got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[2].GetKind(), want)
}
if got, want := val.Values[3].GetBoolValue(), true; got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[3].GetKind(), want)
}
if got, want := val.Values[4].GetStringValue(), "2016-11-15T15:04:05.999999999Z"; got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[4].GetKind(), want)
}
if got, want := val.Values[5].GetStringValue(), "2016-11-15"; got != want {
t.Fatalf("value mismatch: got %v (kind %T), want %v", got, val.Values[5].GetKind(), want)
}
}
}
}
func setupDecodeCustomFieldResult(server *MockedSpannerInMemTestServer, stmt string) error {
metadata := &sppb.ResultSetMetadata{
RowType: &sppb.StructType{
Fields: []*sppb.StructType_Field{
{Name: "Int", Type: &sppb.Type{Code: sppb.TypeCode_INT64}},
{Name: "String", Type: &sppb.Type{Code: sppb.TypeCode_STRING}},
{Name: "Float", Type: &sppb.Type{Code: sppb.TypeCode_FLOAT64}},
{Name: "Bool", Type: &sppb.Type{Code: sppb.TypeCode_BOOL}},
{Name: "Time", Type: &sppb.Type{Code: sppb.TypeCode_TIMESTAMP}},
{Name: "Date", Type: &sppb.Type{Code: sppb.TypeCode_DATE}},
},
},
}
rowValues := []*structpb.Value{
{Kind: &structpb.Value_StringValue{StringValue: "123"}},
{Kind: &structpb.Value_StringValue{StringValue: "A-B"}},
{Kind: &structpb.Value_NumberValue{NumberValue: float64(123.123)}},
{Kind: &structpb.Value_BoolValue{BoolValue: true}},
{Kind: &structpb.Value_StringValue{StringValue: "2016-11-15T15:04:05.999999999Z"}},
{Kind: &structpb.Value_StringValue{StringValue: "2016-11-15"}},
}
rows := []*structpb.ListValue{
{Values: rowValues},
}
resultSet := &sppb.ResultSet{
Metadata: metadata,
Rows: rows,
}
result := &StatementResult{
Type: StatementResultResultSet,
ResultSet: resultSet,
}
return server.TestSpanner.PutStatementResult(stmt, result)
}
func TestClient_DecodeCustomFieldType(t *testing.T) {
t.Parallel()
type typesTable struct {
Int customStructToInt `spanner:"Int"`
String customStructToString `spanner:"String"`
Float customStructToFloat `spanner:"Float"`
Bool customStructToBool `spanner:"Bool"`
Time customStructToTime `spanner:"Time"`
Date customStructToDate `spanner:"Date"`
}
server, client, teardown := setupMockedTestServer(t)
defer teardown()
query := "SELECT * FROM Types"
setupDecodeCustomFieldResult(server, query)
ctx := context.Background()
stmt := Statement{SQL: query}
iter := client.Single().Query(ctx, stmt)
defer iter.Stop()
var results []typesTable
var lenientResults []typesTable
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatalf("failed to get next: %v", err)
}
var d typesTable
if err := row.ToStruct(&d); err != nil {
t.Fatalf("failed to convert a row to a struct: %v", err)
}
results = append(results, d)
var d2 typesTable
if err := row.ToStructLenient(&d2); err != nil {
t.Fatalf("failed to convert a row to a struct: %v", err)
}
lenientResults = append(lenientResults, d2)
}
if len(results) > 1 || len(lenientResults) > 1 {
t.Fatalf("mismatch length of array: got %v, want 1", results)
}
want := typesTable{
Int: customStructToInt{1, 23},
String: customStructToString{"A", "B"},
Float: customStructToFloat{1.23, 12.3},
Bool: customStructToBool{true, false},
Time: customStructToTime{"A", "B"},
Date: customStructToDate{"A", "B"},
}
got := results[0]
if !testEqual(got, want) {
t.Fatalf("mismatch result from ToStruct: got %v, want %v", got, want)
}
got = lenientResults[0]
if !testEqual(got, want) {
t.Fatalf("mismatch result from ToStructLenient: got %v, want %v", got, want)
}
}
func TestClient_EmulatorWithCredentialsFile(t *testing.T) {
old := os.Getenv("SPANNER_EMULATOR_HOST")
defer os.Setenv("SPANNER_EMULATOR_HOST", old)
os.Setenv("SPANNER_EMULATOR_HOST", "localhost:1234")
client, err := NewClientWithConfig(
context.Background(),
"projects/p/instances/i/databases/d",
ClientConfig{},
option.WithCredentialsFile("/path/to/key.json"),
)
defer client.Close()
if err != nil {
t.Fatalf("Failed to create a client with credentials file when running against an emulator: %v", err)
}
}
func TestBatchReadOnlyTransaction_QueryOptions(t *testing.T) {
ctx := context.Background()
qo := QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{
OptimizerVersion: "1",
OptimizerStatisticsPackage: "latest",
}}
_, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: qo})
defer teardown()
txn, err := client.BatchReadOnlyTransaction(ctx, StrongRead())
if err != nil {
t.Fatal(err)
}
defer txn.Cleanup(ctx)
if txn.qo != qo {
t.Fatalf("Query options are mismatched: got %v, want %v", txn.qo, qo)
}
}
func TestBatchReadOnlyTransactionFromID_QueryOptions(t *testing.T) {
qo := QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{
OptimizerVersion: "1",
OptimizerStatisticsPackage: "latest",
}}
_, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{QueryOptions: qo})
defer teardown()
txn := client.BatchReadOnlyTransactionFromID(BatchReadOnlyTransactionID{})
if txn.qo != qo {
t.Fatalf("Query options are mismatched: got %v, want %v", txn.qo, qo)
}
}
type QueryOptionsTestCase struct {
name string
client QueryOptions
env QueryOptions
query QueryOptions
want QueryOptions
}
func queryOptionsTestCases() []QueryOptionsTestCase {
statsPkg := "latest"
return []QueryOptionsTestCase{
{
"Client level",
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: nil},
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
},
{
"Environment level",
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
},
{
"Query level",
QueryOptions{Options: nil},
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
},
{
"Environment level has precedence",
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "2", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "2", OptimizerStatisticsPackage: statsPkg}},
},
{
"Query level has precedence than client level",
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: nil},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "3", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "3", OptimizerStatisticsPackage: statsPkg}},
},
{
"Query level has highest precedence",
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "1", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "2", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "3", OptimizerStatisticsPackage: statsPkg}},
QueryOptions{Options: &sppb.ExecuteSqlRequest_QueryOptions{OptimizerVersion: "3", OptimizerStatisticsPackage: statsPkg}},
},
}
}
func TestClient_DoForEachRow_ShouldNotEndSpanWithIteratorDoneError(t *testing.T) {
// This test cannot be parallel, as the TestExporter does not support that.
te := itestutil.NewTestExporter()
defer te.Unregister()
minOpened := uint64(1)
_, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: minOpened,
WriteSessions: 0,
},
})
defer teardown()
// Wait until all sessions have been created, so we know that those requests will not interfere with the test.
sp := client.idleSessions
waitFor(t, func() error {
sp.mu.Lock()
defer sp.mu.Unlock()
if uint64(sp.idleList.Len()) != minOpened {
return fmt.Errorf("num open sessions mismatch\nWant: %d\nGot: %d", sp.MinOpened, sp.numOpened)
}
return nil
})
iter := client.Single().Query(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums))
iter.Do(func(r *Row) error {
return nil
})
select {
case <-te.Stats:
case <-time.After(1 * time.Second):
t.Fatal("No stats were exported before timeout")
}
// Preferably we would want to lock the TestExporter here, but the mutex TestExporter.mu is not exported, so we
// cannot do that.
if len(te.Spans) == 0 {
t.Fatal("No spans were exported")
}
s := te.Spans[len(te.Spans)-1].Status
if s.Code != int32(codes.OK) {
t.Errorf("Span status mismatch\nGot: %v\nWant: %v", s.Code, codes.OK)
}
}
func TestClient_DoForEachRow_ShouldEndSpanWithQueryError(t *testing.T) {
// This test cannot be parallel, as the TestExporter does not support that.
te := itestutil.NewTestExporter()
defer te.Unregister()
minOpened := uint64(1)
server, client, teardown := setupMockedTestServerWithConfig(t, ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: minOpened,
WriteSessions: 0,
},
})
defer teardown()
// Wait until all sessions have been created, so we know that those requests will not interfere with the test.
sp := client.idleSessions
waitFor(t, func() error {
sp.mu.Lock()
defer sp.mu.Unlock()
if uint64(sp.idleList.Len()) != minOpened {
return fmt.Errorf("num open sessions mismatch\nWant: %d\nGot: %d", sp.MinOpened, sp.numOpened)
}
return nil
})
sql := "SELECT * FROM"
server.TestSpanner.PutStatementResult(sql, &StatementResult{
Type: StatementResultError,
Err: status.Error(codes.InvalidArgument, "Invalid query"),
})
iter := client.Single().Query(context.Background(), NewStatement(sql))
iter.Do(func(r *Row) error {
return nil
})
select {
case <-te.Stats:
case <-time.After(1 * time.Second):
t.Fatal("No stats were exported before timeout")
}
// Preferably we would want to lock the TestExporter here, but the mutex TestExporter.mu is not exported, so we
// cannot do that.
if len(te.Spans) == 0 {
t.Fatal("No spans were exported")
}
s := te.Spans[len(te.Spans)-1].Status
if s.Code != int32(codes.InvalidArgument) {
t.Errorf("Span status mismatch\nGot: %v\nWant: %v", s.Code, codes.InvalidArgument)
}
}
func TestClient_ReadOnlyTransaction_Priority(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, qo := range []QueryOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_HIGH},
} {
for _, tx := range []*ReadOnlyTransaction{
client.Single(),
client.ReadOnlyTransaction(),
} {
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
if tx.singleUse {
tx = client.Single()
}
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{Priority: qo.Priority})
iter.Next()
iter.Stop()
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 2, sppb.RequestOptions{Priority: qo.Priority})
tx.Close()
}
}
}
func TestClient_ReadWriteTransaction_Priority(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, to := range []TransactionOptions{
{},
{CommitPriority: sppb.RequestOptions_PRIORITY_MEDIUM},
} {
for _, qo := range []QueryOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_MEDIUM},
} {
client.ReadWriteTransactionWithOptions(context.Background(), func(ctx context.Context, tx *ReadWriteTransaction) error {
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{Priority: qo.Priority})
iter.Next()
iter.Stop()
tx.UpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
tx.BatchUpdateWithOptions(context.Background(), []Statement{
NewStatement(UpdateBarSetFoo),
}, qo)
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 4, sppb.RequestOptions{Priority: qo.Priority})
return nil
}, to)
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{Priority: to.CommitPriority})
}
}
}
func TestClient_StmtBasedReadWriteTransaction_Priority(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, to := range []TransactionOptions{
{},
{CommitPriority: sppb.RequestOptions_PRIORITY_LOW},
} {
for _, qo := range []QueryOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_LOW},
} {
tx, _ := NewReadWriteStmtBasedTransactionWithOptions(context.Background(), client, to)
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{Priority: qo.Priority})
iter.Next()
iter.Stop()
tx.UpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
tx.BatchUpdateWithOptions(context.Background(), []Statement{
NewStatement(UpdateBarSetFoo),
}, qo)
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 4, sppb.RequestOptions{Priority: qo.Priority})
tx.Commit(context.Background())
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{Priority: to.CommitPriority})
}
}
}
func TestClient_PDML_Priority(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, qo := range []QueryOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_HIGH},
} {
client.PartitionedUpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 1, sppb.RequestOptions{Priority: qo.Priority})
}
}
func TestClient_Apply_Priority(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})})
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, Priority(sppb.RequestOptions_PRIORITY_HIGH))
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{Priority: sppb.RequestOptions_PRIORITY_HIGH})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, ApplyAtLeastOnce())
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, ApplyAtLeastOnce(), Priority(sppb.RequestOptions_PRIORITY_MEDIUM))
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{Priority: sppb.RequestOptions_PRIORITY_MEDIUM})
}
func TestClient_ReadOnlyTransaction_Tag(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, qo := range []QueryOptions{
{},
{RequestTag: "tag-1"},
} {
for _, tx := range []*ReadOnlyTransaction{
client.Single(),
client.ReadOnlyTransaction(),
} {
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
if tx.singleUse {
tx = client.Single()
}
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{RequestTag: qo.RequestTag})
iter.Next()
iter.Stop()
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 2, sppb.RequestOptions{RequestTag: qo.RequestTag})
tx.Close()
}
}
}
func TestClient_ReadWriteTransaction_Tag(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, to := range []TransactionOptions{
{},
{TransactionTag: "tx-tag-1"},
} {
for _, qo := range []QueryOptions{
{},
{RequestTag: "request-tag-1"},
} {
client.ReadWriteTransactionWithOptions(context.Background(), func(ctx context.Context, tx *ReadWriteTransaction) error {
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{RequestTag: qo.RequestTag})
iter.Next()
iter.Stop()
tx.UpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
tx.BatchUpdateWithOptions(context.Background(), []Statement{
NewStatement(UpdateBarSetFoo),
}, qo)
// Check for SQL requests inside the transaction to prevent the check to
// drain the commit request from the server.
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 4, sppb.RequestOptions{RequestTag: qo.RequestTag, TransactionTag: to.TransactionTag})
return nil
}, to)
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{TransactionTag: to.TransactionTag})
}
}
}
func TestClient_StmtBasedReadWriteTransaction_Tag(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, to := range []TransactionOptions{
{},
{TransactionTag: "tx-tag-1"},
} {
for _, qo := range []QueryOptions{
{},
{RequestTag: "request-tag-1"},
} {
tx, _ := NewReadWriteStmtBasedTransactionWithOptions(context.Background(), client, to)
iter := tx.QueryWithOptions(context.Background(), NewStatement(SelectSingerIDAlbumIDAlbumTitleFromAlbums), qo)
iter.Next()
iter.Stop()
iter = tx.ReadWithOptions(context.Background(), "FOO", AllKeys(), []string{"BAR"}, &ReadOptions{RequestTag: qo.RequestTag})
iter.Next()
iter.Stop()
tx.UpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
tx.BatchUpdateWithOptions(context.Background(), []Statement{
NewStatement(UpdateBarSetFoo),
}, qo)
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 4, sppb.RequestOptions{RequestTag: qo.RequestTag, TransactionTag: to.TransactionTag})
tx.Commit(context.Background())
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{TransactionTag: to.TransactionTag})
}
}
}
func TestClient_PDML_Tag(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, qo := range []QueryOptions{
{},
{RequestTag: "request-tag-1"},
} {
client.PartitionedUpdateWithOptions(context.Background(), NewStatement(UpdateBarSetFoo), qo)
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, 1, sppb.RequestOptions{RequestTag: qo.RequestTag})
}
}
func TestClient_Apply_Tagging(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})})
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, TransactionTag("tx-tag"))
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{TransactionTag: "tx-tag"})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, ApplyAtLeastOnce())
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{})
client.Apply(context.Background(), []*Mutation{Insert("foo", []string{"col1"}, []interface{}{"val1"})}, ApplyAtLeastOnce(), TransactionTag("tx-tag"))
checkCommitForExpectedRequestOptions(t, server.TestSpanner, sppb.RequestOptions{TransactionTag: "tx-tag"})
}
func TestClient_PartitionQuery_RequestOptions(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, qo := range []QueryOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_LOW},
{RequestTag: "batch-query-tag"},
{Priority: sppb.RequestOptions_PRIORITY_MEDIUM, RequestTag: "batch-query-with-medium-prio"},
} {
ctx := context.Background()
txn, _ := client.BatchReadOnlyTransaction(ctx, StrongRead())
partitions, _ := txn.PartitionQueryWithOptions(ctx, NewStatement(SelectFooFromBar), PartitionOptions{MaxPartitions: 10}, qo)
for _, p := range partitions {
iter := txn.Execute(ctx, p)
iter.Next()
iter.Stop()
}
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, len(partitions), sppb.RequestOptions{RequestTag: qo.RequestTag, Priority: qo.Priority})
}
}
func TestClient_PartitionRead_RequestOptions(t *testing.T) {
t.Parallel()
server, client, teardown := setupMockedTestServer(t)
defer teardown()
for _, ro := range []ReadOptions{
{},
{Priority: sppb.RequestOptions_PRIORITY_LOW},
{RequestTag: "batch-read-tag"},
{Priority: sppb.RequestOptions_PRIORITY_MEDIUM, RequestTag: "batch-read-with-medium-prio"},
} {
ctx := context.Background()
txn, _ := client.BatchReadOnlyTransaction(ctx, StrongRead())
partitions, _ := txn.PartitionReadWithOptions(ctx, "Albums", KeySets(Key{"foo"}), []string{"SingerId", "AlbumId", "AlbumTitle"}, PartitionOptions{MaxPartitions: 10}, ro)
for _, p := range partitions {
iter := txn.Execute(ctx, p)
iter.Next()
iter.Stop()
}
checkRequestsForExpectedRequestOptions(t, server.TestSpanner, len(partitions), sppb.RequestOptions{RequestTag: ro.RequestTag, Priority: ro.Priority})
}
}
func checkRequestsForExpectedRequestOptions(t *testing.T, server InMemSpannerServer, reqCount int, ro sppb.RequestOptions) {
reqs := drainRequestsFromServer(server)
reqOptions := []*sppb.RequestOptions{}
for _, req := range reqs {
if sqlReq, ok := req.(*sppb.ExecuteSqlRequest); ok {
reqOptions = append(reqOptions, sqlReq.RequestOptions)
}
if batchReq, ok := req.(*sppb.ExecuteBatchDmlRequest); ok {
reqOptions = append(reqOptions, batchReq.RequestOptions)
}
if readReq, ok := req.(*sppb.ReadRequest); ok {
reqOptions = append(reqOptions, readReq.RequestOptions)
}
}
if got, want := len(reqOptions), reqCount; got != want {
t.Fatalf("Requests length mismatch\nGot: %v\nWant: %v", got, want)
}
for _, opts := range reqOptions {
if opts == nil {
opts = &sppb.RequestOptions{}
}
if got, want := opts.Priority, ro.Priority; got != want {
t.Fatalf("Request priority mismatch\nGot: %v\nWant: %v", got, want)
}
if got, want := opts.RequestTag, ro.RequestTag; got != want {
t.Fatalf("Request tag mismatch\nGot: %v\nWant: %v", got, want)
}
if got, want := opts.TransactionTag, ro.TransactionTag; got != want {
t.Fatalf("Transaction tag mismatch\nGot: %v\nWant: %v", got, want)
}
}
}
func checkCommitForExpectedRequestOptions(t *testing.T, server InMemSpannerServer, ro sppb.RequestOptions) {
reqs := drainRequestsFromServer(server)
var commit *sppb.CommitRequest
var ok bool
for _, req := range reqs {
if commit, ok = req.(*sppb.CommitRequest); ok {
break
}
}
if commit == nil {
t.Fatalf("Missing commit request")
}
var got sppb.RequestOptions_Priority
if commit.RequestOptions != nil {
got = commit.RequestOptions.Priority
}
want := ro.Priority
if got != want {
t.Fatalf("Commit priority mismatch\nGot: %v\nWant: %v", got, want)
}
var requestTag string
var transactionTag string
if commit.RequestOptions != nil {
requestTag = commit.RequestOptions.RequestTag
transactionTag = commit.RequestOptions.TransactionTag
}
if got, want := requestTag, ro.RequestTag; got != want {
t.Fatalf("Commit request tag mismatch\nGot: %v\nWant: %v", got, want)
}
if got, want := transactionTag, ro.TransactionTag; got != want {
t.Fatalf("Commit transaction tag mismatch\nGot: %v\nWant: %v", got, want)
}
}
func TestClient_Single_Read_WithNumericKey(t *testing.T) {
t.Parallel()
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
iter := client.Single().Read(ctx, "Albums", KeySets(Key{*big.NewRat(1, 1)}), []string{"SingerId", "AlbumId", "AlbumTitle"})
defer iter.Stop()
rowCount := int64(0)
for {
_, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
rowCount++
}
if rowCount != SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount {
t.Fatalf("row count mismatch\nGot: %v\nWant: %v", rowCount, SelectSingerIDAlbumIDAlbumTitleFromAlbumsRowCount)
}
}
func TestClient_Single_ReadRowWithOptions(t *testing.T) {
t.Parallel()
_, client, teardown := setupMockedTestServer(t)
defer teardown()
ctx := context.Background()
row, err := client.Single().ReadRowWithOptions(ctx, "Albums", Key{"foo"}, []string{"SingerId", "AlbumId", "AlbumTitle"}, &ReadOptions{RequestTag: "foo/bar"})
if err != nil {
t.Fatalf("Unexpected error for read row with options: %v", err)
}
if row == nil {
t.Fatal("ReadRowWithOptions did not return a row")
}
}
func TestClient_CloseWithUnresponsiveBackend(t *testing.T) {
t.Parallel()
minOpened := uint64(5)
server, client, teardown := setupMockedTestServerWithConfig(t,
ClientConfig{
SessionPoolConfig: SessionPoolConfig{
MinOpened: minOpened,
},
})
defer teardown()
sp := client.idleSessions
waitFor(t, func() error {
sp.mu.Lock()
defer sp.mu.Unlock()
if uint64(sp.idleList.Len()) != minOpened {
return fmt.Errorf("num open sessions mismatch\nWant: %d\nGot: %d", sp.MinOpened, sp.numOpened)
}
return nil
})
server.TestSpanner.Freeze()
defer server.TestSpanner.Unfreeze()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
sp.close(ctx)
if w, g := context.DeadlineExceeded, ctx.Err(); w != g {
t.Fatalf("context error mismatch\nWant: %v\nGot: %v", w, g)
}
}
| [
"\"SPANNER_EMULATOR_HOST\""
] | [] | [
"SPANNER_EMULATOR_HOST"
] | [] | ["SPANNER_EMULATOR_HOST"] | go | 1 | 0 | |
test/pkg/suite_init/werf_binary_data.go | package suite_init
import (
"os"
"runtime"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
type WerfBinaryData struct {
WerfBinPath string
}
func NewWerfBinaryData(synchronizedSuiteCallbacksData *SynchronizedSuiteCallbacksData) *WerfBinaryData {
data := &WerfBinaryData{}
synchronizedSuiteCallbacksData.SetSynchronizedBeforeSuiteNode1FuncWithReturnValue(ComputeWerfBinPath)
synchronizedSuiteCallbacksData.AppendSynchronizedBeforeSuiteAllNodesFunc(func(computedPath []byte) {
data.WerfBinPath = string(computedPath)
})
synchronizedSuiteCallbacksData.AppendSynchronizedAfterSuiteNode1Func(gexec.CleanupBuildArtifacts)
return data
}
func ComputeWerfBinPath() []byte {
werfBinPath := os.Getenv("WERF_TEST_BINARY_PATH")
if werfBinPath == "" {
var err error
if runtime.GOOS == "linux" {
werfBinPath, err = gexec.BuildWithEnvironment("github.com/werf/werf/cmd/werf", []string{"CGO_ENABLED=1"}, "-compiler", "gc", "-ldflags", "-linkmode external -extldflags=-static", "-tags", "dfrunmount dfssh containers_image_openpgp osusergo exclude_graphdriver_devicemapper netgo no_devmapper static_build")
} else {
werfBinPath, err = gexec.BuildWithEnvironment("github.com/werf/werf/cmd/werf", nil, "-compiler", "gc", "-tags", "dfrunmount dfssh containers_image_openpgp")
}
Ω(err).ShouldNot(HaveOccurred())
}
return []byte(werfBinPath)
}
| [
"\"WERF_TEST_BINARY_PATH\""
] | [] | [
"WERF_TEST_BINARY_PATH"
] | [] | ["WERF_TEST_BINARY_PATH"] | go | 1 | 0 | |
cmd/buildah/common.go | package main
import (
"context"
"os"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/pkg/umask"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
// configuration, including customizations made in containers.conf
needToShutdownStore = false
)
const (
maxPullPushRetries = 3
pullPushRetryDelay = 2 * time.Second
)
func getStore(c *cobra.Command) (storage.Store, error) {
options, err := storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
if err != nil {
return nil, err
}
if c.Flag("root").Changed || c.Flag("runroot").Changed {
options.GraphRoot = globalFlagResults.Root
options.RunRoot = globalFlagResults.RunRoot
}
if err := setXDGRuntimeDir(); err != nil {
return nil, err
}
if c.Flag("storage-driver").Changed {
options.GraphDriverName = globalFlagResults.StorageDriver
// If any options setup in config, these should be dropped if user overrode the driver
options.GraphDriverOptions = []string{}
}
if c.Flag("storage-opt").Changed {
if len(globalFlagResults.StorageOpts) > 0 {
options.GraphDriverOptions = globalFlagResults.StorageOpts
}
}
// Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
// of the mount command.
// Differently, allow the mount if we are already in a userns, as the mount point will still
// be accessible once "buildah mount" exits.
if os.Geteuid() != 0 && options.GraphDriverName != "vfs" {
return nil, errors.Errorf("cannot mount using driver %s in rootless mode. You need to run it in a `buildah unshare` session", options.GraphDriverName)
}
// For uid/gid mappings, first we check the global definitions
if len(globalFlagResults.UserNSUID) > 0 || len(globalFlagResults.UserNSGID) > 0 {
if !(len(globalFlagResults.UserNSUID) > 0 && len(globalFlagResults.UserNSGID) > 0) {
return nil, errors.Errorf("--userns-uid-map and --userns-gid-map must be used together")
}
uopts := globalFlagResults.UserNSUID
gopts := globalFlagResults.UserNSGID
if len(uopts) == 0 {
return nil, errors.New("--userns-uid-map used with no mappings?")
}
if len(gopts) == 0 {
return nil, errors.New("--userns-gid-map used with no mappings?")
}
uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts)
if err != nil {
return nil, err
}
options.UIDMap = uidmap
options.GIDMap = gidmap
}
// If a subcommand has the flags, check if they are set; if so, override the global values
localUIDMapFlag := c.Flags().Lookup("userns-uid-map")
localGIDMapFlag := c.Flags().Lookup("userns-gid-map")
if localUIDMapFlag != nil && localGIDMapFlag != nil && (localUIDMapFlag.Changed || localGIDMapFlag.Changed) {
if !(localUIDMapFlag.Changed && localGIDMapFlag.Changed) {
return nil, errors.Errorf("--userns-uid-map and --userns-gid-map must be used together")
}
// We know that the flags are both !nil and have been changed (i.e. have values)
uopts, _ := c.Flags().GetStringSlice("userns-uid-map")
gopts, _ := c.Flags().GetStringSlice("userns-gid-map")
if len(uopts) == 0 {
return nil, errors.New("--userns-uid-map used with no mappings?")
}
if len(gopts) == 0 {
return nil, errors.New("--userns-gid-map used with no mappings?")
}
uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts)
if err != nil {
return nil, err
}
options.UIDMap = uidmap
options.GIDMap = gidmap
}
umask.CheckUmask()
store, err := storage.GetStore(options)
if store != nil {
is.Transport.SetStore(store)
}
needToShutdownStore = true
return store, err
}
// setXDGRuntimeDir sets XDG_RUNTIME_DIR when if it is unset under rootless
func setXDGRuntimeDir() error {
if unshare.IsRootless() && os.Getenv("XDG_RUNTIME_DIR") == "" {
runtimeDir, err := storage.GetRootlessRuntimeDir(unshare.GetRootlessUID())
if err != nil {
return err
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
return errors.New("could not set XDG_RUNTIME_DIR")
}
}
return nil
}
func openBuilder(ctx context.Context, store storage.Store, name string) (builder *buildah.Builder, err error) {
if name != "" {
builder, err = buildah.OpenBuilder(store, name)
if os.IsNotExist(errors.Cause(err)) {
options := buildah.ImportOptions{
Container: name,
}
builder, err = buildah.ImportBuilder(ctx, store, options)
}
}
if err != nil {
return nil, errors.Wrapf(err, "error reading build container")
}
if builder == nil {
return nil, errors.Errorf("error finding build container")
}
return builder, nil
}
func openBuilders(store storage.Store) (builders []*buildah.Builder, err error) {
return buildah.OpenAllBuilders(store)
}
func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) {
options := buildah.ImportFromImageOptions{
Image: name,
SystemContext: sc,
}
builder, err = buildah.ImportBuilderFromImage(ctx, store, options)
if err != nil {
return nil, errors.Wrapf(err, "error reading image")
}
if builder == nil {
return nil, errors.Errorf("error mocking up build configuration")
}
return builder, nil
}
func getDateAndDigestAndSize(ctx context.Context, sys *types.SystemContext, store storage.Store, storeImage storage.Image) (time.Time, string, int64, error) {
created := time.Time{}
is.Transport.SetStore(store)
storeRef, err := is.Transport.ParseStoreReference(store, storeImage.ID)
if err != nil {
return created, "", -1, err
}
img, err := storeRef.NewImageSource(ctx, nil)
if err != nil {
return created, "", -1, err
}
defer img.Close()
imgSize, sizeErr := store.ImageSize(storeImage.ID)
if sizeErr != nil {
imgSize = -1
}
manifestBytes, _, manifestErr := img.GetManifest(ctx, nil)
manifestDigest := ""
if manifestErr == nil && len(manifestBytes) > 0 {
mDigest, err := manifest.Digest(manifestBytes)
manifestErr = err
if manifestErr == nil {
manifestDigest = mDigest.String()
}
}
inspectable, inspectableErr := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(img, nil))
if inspectableErr == nil && inspectable != nil {
inspectInfo, inspectErr := inspectable.Inspect(ctx)
if inspectErr == nil && inspectInfo != nil && inspectInfo.Created != nil {
created = *inspectInfo.Created
}
}
if sizeErr != nil {
err = sizeErr
} else if manifestErr != nil {
err = manifestErr
}
return created, manifestDigest, imgSize, err
}
// getContext returns a context.TODO
func getContext() context.Context {
return context.TODO()
}
func getUserFlags() pflag.FlagSet {
fs := pflag.FlagSet{}
fs.String("user", "", "`user[:group]` to run the command as")
return fs
}
func defaultFormat() string {
format := os.Getenv("BUILDAH_FORMAT")
if format != "" {
return format
}
return buildah.OCI
}
// imageIsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
func imageIsParent(ctx context.Context, sc *types.SystemContext, store storage.Store, image *storage.Image) (bool, error) {
children, err := getChildren(ctx, sc, store, image, 1)
if err != nil {
return false, err
}
return len(children) > 0, nil
}
func getImageConfig(ctx context.Context, sc *types.SystemContext, store storage.Store, imageID string) (*imgspecv1.Image, error) {
ref, err := is.Transport.ParseStoreReference(store, imageID)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse reference to image %q", imageID)
}
image, err := ref.NewImage(ctx, sc)
if err != nil {
if img, err2 := store.Image(imageID); err2 == nil && img.ID == imageID {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to open image %q", imageID)
}
config, err := image.OCIConfig(ctx)
defer image.Close()
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", imageID)
}
return config, nil
}
func historiesDiffer(a, b []imgspecv1.History) bool {
if len(a) != len(b) {
return true
}
i := 0
for i < len(a) {
if a[i].Created == nil && b[i].Created != nil {
break
}
if a[i].Created != nil && b[i].Created == nil {
break
}
if a[i].Created != nil && b[i].Created != nil && !a[i].Created.Equal(*(b[i].Created)) {
break
}
if a[i].CreatedBy != b[i].CreatedBy {
break
}
if a[i].Author != b[i].Author {
break
}
if a[i].Comment != b[i].Comment {
break
}
if a[i].EmptyLayer != b[i].EmptyLayer {
break
}
i++
}
return i != len(a)
}
// getParent returns the image's parent image. Return nil if a parent is not found.
func getParent(ctx context.Context, sc *types.SystemContext, store storage.Store, child *storage.Image) (*storage.Image, error) {
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve image list from store")
}
var childTopLayer *storage.Layer
if child.TopLayer != "" {
childTopLayer, err = store.Layer(child.TopLayer)
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve information about layer %s from store", child.TopLayer)
}
}
childConfig, err := getImageConfig(ctx, sc, store, child.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", child.ID)
}
if childConfig == nil {
return nil, nil
}
for _, parent := range images {
if parent.ID == child.ID {
continue
}
if childTopLayer != nil && parent.TopLayer != childTopLayer.Parent && parent.TopLayer != childTopLayer.ID {
continue
}
parentConfig, err := getImageConfig(ctx, sc, store, parent.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", parent.ID)
}
if parentConfig == nil {
continue
}
if len(parentConfig.History)+1 != len(childConfig.History) {
continue
}
if len(parentConfig.RootFS.DiffIDs) > 0 {
if len(childConfig.RootFS.DiffIDs) < len(parentConfig.RootFS.DiffIDs) {
continue
}
childUsesAllParentLayers := true
for i := range parentConfig.RootFS.DiffIDs {
if childConfig.RootFS.DiffIDs[i] != parentConfig.RootFS.DiffIDs[i] {
childUsesAllParentLayers = false
break
}
}
if !childUsesAllParentLayers {
continue
}
}
if historiesDiffer(parentConfig.History, childConfig.History[:len(parentConfig.History)]) {
continue
}
return &parent, nil
}
return nil, nil
}
// getChildren returns a list of the imageIDs that depend on the image
func getChildren(ctx context.Context, sc *types.SystemContext, store storage.Store, parent *storage.Image, max int) ([]string, error) {
var children []string
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve images from store")
}
parentConfig, err := getImageConfig(ctx, sc, store, parent.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", parent.ID)
}
if parentConfig == nil {
return nil, nil
}
for _, child := range images {
if child.ID == parent.ID {
continue
}
var childTopLayer *storage.Layer
if child.TopLayer != "" {
childTopLayer, err = store.Layer(child.TopLayer)
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve information about layer %q from store", child.TopLayer)
}
if childTopLayer.Parent != parent.TopLayer && childTopLayer.ID != parent.TopLayer {
continue
}
}
childConfig, err := getImageConfig(ctx, sc, store, child.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", child.ID)
}
if childConfig == nil {
continue
}
if len(parentConfig.History)+1 != len(childConfig.History) {
continue
}
if historiesDiffer(parentConfig.History, childConfig.History[:len(parentConfig.History)]) {
continue
}
children = append(children, child.ID)
if max > 0 && len(children) >= max {
break
}
}
return children, nil
}
func getFormat(format string) (string, error) {
switch format {
case buildah.OCI:
return buildah.OCIv1ImageManifest, nil
case buildah.DOCKER:
return buildah.Dockerv2ImageManifest, nil
default:
return "", errors.Errorf("unrecognized image type %q", format)
}
}
// Tail returns a string slice after the first element unless there are
// not enough elements, then it returns an empty slice. This is to replace
// the urfavecli Tail method for args
func Tail(a []string) []string {
if len(a) >= 2 {
return a[1:]
}
return []string{}
}
// UsageTemplate returns the usage template for podman commands
// This blocks the desplaying of the global options. The main podman
// command should not use this.
func UsageTemplate() string {
return `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
{{end}}
`
}
| [
"\"XDG_RUNTIME_DIR\"",
"\"BUILDAH_FORMAT\""
] | [] | [
"XDG_RUNTIME_DIR",
"BUILDAH_FORMAT"
] | [] | ["XDG_RUNTIME_DIR", "BUILDAH_FORMAT"] | go | 2 | 0 | |
raiden/tests/utils/smoketest.py | import os
import shutil
import sys
import tempfile
import traceback
from http import HTTPStatus
import click
import requests
from eth_utils import (
decode_hex,
encode_hex,
remove_0x_prefix,
to_canonical_address,
to_checksum_address,
)
from web3 import HTTPProvider, Web3
from web3.middleware import geth_poa_middleware
from raiden.accounts import AccountManager
from raiden.connection_manager import ConnectionManager
from raiden.network.proxies import TokenNetworkRegistry
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.utils import get_free_port
from raiden.raiden_service import RaidenService
from raiden.tests.fixtures.variables import DEFAULT_PASSPHRASE
from raiden.tests.utils.geth import (
GethNodeDescription,
geth_node_config,
geth_node_config_set_bootnodes,
geth_node_to_datadir,
geth_run_nodes,
geth_wait_and_check,
)
from raiden.tests.utils.smartcontracts import deploy_contract_web3, deploy_token
from raiden.transfer import channel, views
from raiden.transfer.state import CHANNEL_STATE_OPENED
from raiden.utils import get_project_root, privatekey_to_address
from raiden_contracts.constants import (
CONTRACT_ENDPOINT_REGISTRY,
CONTRACT_SECRET_REGISTRY,
CONTRACT_TOKEN_NETWORK_REGISTRY,
NETWORKNAME_TO_ID,
TEST_SETTLE_TIMEOUT_MAX,
TEST_SETTLE_TIMEOUT_MIN,
)
from raiden_contracts.contract_manager import ContractManager, contracts_precompiled_path
# the smoketest will assert that a different endpoint got successfully registered
TEST_ENDPOINT = '9.9.9.9:9999'
TEST_PARTNER_ADDRESS = '2' * 40
TEST_DEPOSIT_AMOUNT = 5
TEST_PRIVKEY = (
b'\xad\xd4\xd3\x10\xba\x04$hy\x1d\xd7\xbf\x7fn\xae\x85\xac'
b'\xc4\xdd\x14?\xfa\x81\x0e\xf1\x80\x9aj\x11\xf2\xbcD'
)
TEST_ACCOUNT_ADDRESS = privatekey_to_address(TEST_PRIVKEY)
RST_DATADIR = tempfile.mkdtemp()
os.environ['RST_DATADIR'] = RST_DATADIR
def ensure_executable(cmd):
"""look for the given command and make sure it can be executed"""
if not shutil.which(cmd):
print(
'Error: unable to locate %s binary.\n'
'Make sure it is installed and added to the PATH variable.' % cmd,
)
sys.exit(1)
def run_restapi_smoketests():
"""Test if REST api works. """
url = 'http://localhost:{port}/api/v1/channels'.format(port=5001)
response = requests.get(url)
assert response.status_code == HTTPStatus.OK
response_json = response.json()
assert (response_json[0]['partner_address'] ==
to_checksum_address(ConnectionManager.BOOTSTRAP_ADDR))
assert response_json[0]['state'] == 'opened'
assert response_json[0]['balance'] > 0
def run_smoketests(
raiden_service: RaidenService,
transport: str,
token_addresses,
discovery_address,
debug: bool = False,
):
""" Test that the assembled raiden_service correctly reflects the configuration from the
smoketest_genesis. """
try:
chain = raiden_service.chain
token_network_added_events = raiden_service.default_registry.filter_token_added_events()
events_token_addresses = [
event['args']['token_address']
for event in token_network_added_events
]
assert events_token_addresses == token_addresses
if transport == 'udp':
discovery_addresses = list(chain.address_to_discovery.keys())
assert len(discovery_addresses) == 1, repr(chain.address_to_discovery)
assert discovery_addresses[0] == discovery_address
discovery = chain.address_to_discovery[discovery_addresses[0]]
assert discovery.endpoint_by_address(raiden_service.address) != TEST_ENDPOINT
token_networks = views.get_token_network_addresses_for(
views.state_from_raiden(raiden_service),
raiden_service.default_registry.address,
)
assert len(token_networks) == 1
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden_service),
raiden_service.default_registry.address,
token_networks[0],
decode_hex(TEST_PARTNER_ADDRESS),
)
distributable = channel.get_distributable(
channel_state.our_state,
channel_state.partner_state,
)
assert distributable == TEST_DEPOSIT_AMOUNT
assert distributable == channel_state.our_state.contract_balance
assert channel.get_status(channel_state) == CHANNEL_STATE_OPENED
# Run API test
run_restapi_smoketests()
except: # NOQA pylint: disable=bare-except
error = traceback.format_exc()
if debug:
import pdb
pdb.post_mortem() # pylint: disable=no-member
return error
return None
def deploy_smoketest_contracts(client, chain_id, contract_manager):
client.web3.personal.unlockAccount(
client.web3.eth.accounts[0],
DEFAULT_PASSPHRASE,
)
endpoint_registry_address = deploy_contract_web3(
contract_name=CONTRACT_ENDPOINT_REGISTRY,
deploy_client=client,
contract_manager=contract_manager,
)
secret_registry_address = deploy_contract_web3(
contract_name=CONTRACT_SECRET_REGISTRY,
deploy_client=client,
contract_manager=contract_manager,
)
token_network_registry_address = deploy_contract_web3(
contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,
deploy_client=client,
contract_manager=contract_manager,
constructor_arguments=(
to_checksum_address(secret_registry_address),
chain_id,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
),
)
addresses = {
CONTRACT_ENDPOINT_REGISTRY: endpoint_registry_address,
CONTRACT_SECRET_REGISTRY: secret_registry_address,
CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_address,
}
return addresses
def get_private_key(keystore):
accmgr = AccountManager(keystore)
if not accmgr.accounts:
raise RuntimeError("No Ethereum accounts found in the user's system")
addresses = list(accmgr.accounts.keys())
return accmgr.get_privkey(addresses[0], DEFAULT_PASSPHRASE)
def setup_testchain_and_raiden(transport, matrix_server, print_step, contracts_version):
print_step('Starting Ethereum node')
ensure_executable('geth')
free_port = get_free_port('127.0.0.1')
rpc_port = next(free_port)
p2p_port = next(free_port)
base_datadir = os.environ['RST_DATADIR']
description = GethNodeDescription(
private_key=TEST_PRIVKEY,
rpc_port=rpc_port,
p2p_port=p2p_port,
miner=True,
)
eth_rpc_endpoint = f'http://127.0.0.1:{rpc_port}'
web3 = Web3(HTTPProvider(endpoint_uri=eth_rpc_endpoint))
web3.middleware_stack.inject(geth_poa_middleware, layer=0)
config = geth_node_config(
description.private_key,
description.p2p_port,
description.rpc_port,
)
config.update({
'unlock': 0,
'mine': True,
'password': os.path.join(base_datadir, 'pw'),
})
nodes_configuration = [config]
geth_node_config_set_bootnodes(nodes_configuration)
keystore = os.path.join(geth_node_to_datadir(config, base_datadir), 'keystore')
logdir = os.path.join(base_datadir, 'logs')
processes_list = geth_run_nodes(
geth_nodes=[description],
nodes_configuration=nodes_configuration,
base_datadir=base_datadir,
genesis_file=os.path.join(get_project_root(), 'smoketest_genesis.json'),
chain_id=NETWORKNAME_TO_ID['smoketest'],
verbosity=0,
logdir=logdir,
)
try:
# the marker is hardcoded in the genesis file
random_marker = remove_0x_prefix(encode_hex(b'raiden'))
geth_wait_and_check(
web3=web3,
accounts_addresses=[],
random_marker=random_marker,
processes_list=processes_list,
)
except (ValueError, RuntimeError) as e:
# If geth_wait_and_check or the above loop throw an exception make sure
# we don't end up with a rogue geth process running in the background
for process in processes_list:
process.terminate()
raise e
print_step('Deploying Raiden contracts')
client = JSONRPCClient(web3, get_private_key(keystore))
contract_manager = ContractManager(
contracts_precompiled_path(contracts_version),
)
contract_addresses = deploy_smoketest_contracts(
client=client,
chain_id=NETWORKNAME_TO_ID['smoketest'],
contract_manager=contract_manager,
)
token = deploy_token(
deploy_client=client,
contract_manager=contract_manager,
initial_amount=1000,
decimals=0,
token_name='TKN',
token_symbol='TKN',
)
registry = TokenNetworkRegistry(
jsonrpc_client=client,
registry_address=contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY],
contract_manager=contract_manager,
)
registry.add_token(
token_address=to_canonical_address(token.contract.address),
given_block_identifier='latest',
)
print_step('Setting up Raiden')
endpoint_registry_contract_address = to_checksum_address(
contract_addresses[CONTRACT_ENDPOINT_REGISTRY],
)
tokennetwork_registry_contract_address = to_checksum_address(
contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY],
)
secret_registry_contract_address = to_checksum_address(
contract_addresses[CONTRACT_SECRET_REGISTRY],
)
return {
'args': {
'address': to_checksum_address(TEST_ACCOUNT_ADDRESS),
'datadir': keystore,
'endpoint_registry_contract_address': endpoint_registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'gas_price': 'fast',
'keystore_path': keystore,
'matrix_server': matrix_server,
'network_id': str(NETWORKNAME_TO_ID['smoketest']),
'password_file': click.File()(os.path.join(base_datadir, 'pw')),
'tokennetwork_registry_contract_address': tokennetwork_registry_contract_address,
'secret_registry_contract_address': secret_registry_contract_address,
'sync_check': False,
'transport': transport,
},
'contract_addresses': contract_addresses,
'ethereum': processes_list,
'token': token,
}
| [] | [] | [
"RST_DATADIR"
] | [] | ["RST_DATADIR"] | python | 1 | 0 | |
pypa/pict.py | import facebook
import os
from notipy.cli import Notipy
import pypapath
import testInternet
NO = '\033[0m' # white (normal)
RED = '\033[31m' # red
def handle():
try:
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
HOME_DIR = os.environ['HOME']
os.chdir(HOME_DIR)
print('''Do you have access token?
If no, then goto https://developers.facebook.com/tools/explorer
and click on 'Get Token' button then click on 'Get User Access Token'
and on the User Data Permissions mark on 'pubish_actions'
''')
imageName = input('Enter the image name with extension: ')
imageLocation = input('Enter the image location: ')
os.chdir(imageLocation)
caption = input('Enter the caption to the image: ')
token = input('Enter Facebook access token: ')
graph = facebook.GraphAPI(access_token = token)
graph.put_photo(image=open(imageName, 'rb'), message= caption)
print('Your photo is uploaded!')
Notipy().send('Your photo is uploaded!')
except FileNotFoundError:
print(RED + 'No such file or directory!' + NO)
except:
print(RED + '\nClosing' + NO)
if not testInternet.is_connected():
print(RED + 'Internet is not working well! Check your connection.' + NO)
exit(0)
handle()
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
assets/catnip/main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"code.cloudfoundry.org/clock"
"github.com/cloudfoundry/capi-bara-tests/assets/catnip/router"
)
func main() {
fmt.Printf("listening on port %s...\n", os.Getenv("PORT"))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", os.Getenv("PORT")), router.New(os.Stdout, clock.NewClock())))
}
| [
"\"PORT\"",
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
db.py | import psycopg2
import os
conn = None
def setup_database():
try:
conn = create_connection()
cur = conn.cursor()
print("creating table if it doesnt exist")
cur.execute("CREATE TABLE IF NOT EXISTS images (id SERIAL PRIMARY KEY, key VARCHAR(255), image BYTEA, guildId VARCHAR(255))")
conn.commit()
except (Exception, psycopg2.Error) as error:
if conn:
print("Failed to setup database tables", error)
finally:
if conn:
cur.close()
conn.close()
def create_connection():
url = os.environ['DATABASE_URL']
if os.getenv("LOCAL"):
return psycopg2.connect(url)
return psycopg2.connect(url, sslmode='require')
def image_exists(key, guildId):
exists = True
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = "SELECT key FROM images WHERE key = %s AND guildId = %s"
params = (key, str(guildId))
cur.execute(postgres_insert_query, params)
rows = cur.fetchall()
exists = len(rows) > 0
except (Exception, psycopg2.Error) as error:
if (conn):
print("Failed to check if key exists: ", error)
finally:
if (conn):
cur.close()
conn.close()
return exists
def insert_image(key, image, guildId):
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = "INSERT INTO images (key, image, guildId) VALUES (%s, %s, %s)"
record_to_insert = (key, image, str(guildId))
cur.execute(postgres_insert_query, record_to_insert)
conn.commit()
print("Added image to db")
except (Exception, psycopg2.Error) as error:
if (conn):
print("Failed to insert record in images table", error)
finally:
if (conn):
cur.close()
conn.close()
def read_image(key, guildId):
rows=[]
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = f"SELECT image FROM images WHERE key = %s AND guildId = %s"
cur.execute(postgres_insert_query, (key, str(guildId)))
rows = cur.fetchall() #get all records
except (Exception, psycopg2.Error) as error:
if conn:
print("Failed to insert record in images table", error)
finally:
if conn:
cur.close()
conn.close()
if len(rows) > 0:
return rows[0][0]
return None
def delete_image(key, guildId):
deleted_rows = 0
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = "DELETE FROM images WHERE key = %s AND guildId = %s"
params = (key, str(guildId))
cur.execute(postgres_insert_query, params)
deleted_rows = cur.rowcount
conn.commit()
print("Deleted image from db")
except (Exception, psycopg2.Error) as error:
if conn:
print("Failed to delete record from images table", error)
finally:
if conn:
cur.close()
conn.close()
return deleted_rows
def update_image(key, image, guildId):
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = "UPDATE images SET image = %s WHERE key = %s AND guildId = %s"
record_to_update = (image, key, str(guildId))
cur.execute(postgres_insert_query, record_to_update)
conn.commit()
print("Image updated")
except (Exception, psycopg2.Error) as error:
if conn:
print("Failed to update record in images table", error)
finally:
if conn:
cur.close()
conn.close()
def list_keys(guildId):
try:
conn = create_connection()
cur = conn.cursor()
postgres_insert_query = "SELECT key FROM images WHERE guildId = %s"
params = (str(guildId),)
cur.execute(postgres_insert_query, params)
# the query is bringing back all the arrays, not smart enough to only bring the first element, i.e. keys only
rows = cur.fetchall()
# so we create a new array, and only insert keys into it, to get round the problem
keys = []
for row in rows:
keys.append(row[0])
return keys
except (Exception, psycopg2.Error) as error:
if conn:
print("Failed to obtain a list of keys", error)
finally:
if conn:
cur.close()
conn.close() | [] | [] | [
"LOCAL",
"DATABASE_URL"
] | [] | ["LOCAL", "DATABASE_URL"] | python | 2 | 0 | |
pivo/celery.py | # coding: utf-8
from __future__ import absolute_import
import os
from django.apps import apps
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pivo.settings.local")
app = Celery('pivo_tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: [n.name for n in apps.get_app_configs()])
| [] | [] | [] | [] | [] | python | 0 | 0 | |
grafana/grafana.go | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grafana
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
)
type APIClient struct {
BaseUrl *url.URL
HTTPClient *http.Client
Id int
logger log.Logger
}
type GrafanaDashboard struct {
Id int `json:"id"`
Title string `json:"title"`
Uri string `json:"uri"`
Uid string `json:"uid"`
Type string `json:"type"`
FolderId int `json:"folderId"`
}
type GrafanaDashboardConfigmap struct {
Dashboard struct{ Title string } `json:"dashboard"`
FolderId int `json:"folderId"`
}
// return a list of grafana dashboards
func (c *APIClient) SearchDashboard() ([]GrafanaDashboard, error) {
searchUrl := makeUrl(c.BaseUrl, "/api/search")
resp, err := c.HTTPClient.Get(searchUrl)
if err != nil {
return nil, err
}
defer resp.Body.Close()
searchResult := make([]GrafanaDashboard, 0)
err = json.NewDecoder(resp.Body).Decode(&searchResult)
if err != nil {
return nil, err
}
return searchResult, nil
}
// return a list of grafana datasources
func (c *APIClient) SearchDatasource() ([]map[string]interface{}, error) {
searchUrl := makeUrl(c.BaseUrl, "/api/datasources")
resp, err := c.HTTPClient.Get(searchUrl)
if err != nil {
return nil, err
}
defer resp.Body.Close()
searchResult := make([]map[string]interface{}, 0)
err = json.NewDecoder(resp.Body).Decode(&searchResult)
if err != nil {
level.Error(c.logger).Log("err", err.Error())
return nil, err
}
return searchResult, nil
}
//return a list of notification channels
func (c *APIClient) SearchNotificationChannel() ([]map[string]interface{}, error) {
searchUrl := makeUrl(c.BaseUrl, "/api/alert-notifications")
resp, err := c.HTTPClient.Get(searchUrl)
if err != nil {
return nil, err
}
defer resp.Body.Close()
searchResult := make([]map[string]interface{}, 0)
err = json.NewDecoder(resp.Body).Decode(&searchResult)
if err != nil {
level.Error(c.logger).Log("err", err.Error())
return nil, err
}
return searchResult, nil
}
// return a list of folders
func (c *APIClient) SearchFolder() ([]map[string]interface{}, error) {
searchUrl := makeUrl(c.BaseUrl, "/api/folders")
resp, err := c.HTTPClient.Get(searchUrl)
if err != nil {
return nil, err
}
defer resp.Body.Close()
searchResult := make([]map[string]interface{}, 0)
err = json.NewDecoder(resp.Body).Decode(&searchResult)
if err != nil {
level.Error(c.logger).Log("err", err.Error())
return nil, err
}
return searchResult, nil
}
func (c *APIClient) DeleteDashboard(uid string) error {
deleteUrl := makeUrl(c.BaseUrl, "/api/dashboards/uid/"+uid)
req, err := http.NewRequest("DELETE", deleteUrl, nil)
if err != nil {
return err
}
return c.doRequest(req)
}
func (c *APIClient) DeleteDatasource(datasourceJSON io.Reader) error {
datasource := make(map[string]interface{})
err := json.NewDecoder(datasourceJSON).Decode(&datasource)
if err != nil {
level.Error(c.logger).Log("err", err.Error())
}
deleteUrl := makeUrl(c.BaseUrl, "/api/datasources/name/"+datasource["name"].(string))
req, err := http.NewRequest("DELETE", deleteUrl, nil)
if err != nil {
return err
}
return c.doRequest(req)
}
func (c *APIClient) DeleteNotificationChannel(id int) error {
deleteUrl := makeUrl(c.BaseUrl, "/api/alert-notifications/"+strconv.Itoa(id))
req, err := http.NewRequest("DELETE", deleteUrl, nil)
if err != nil {
return err
}
return c.doRequest(req)
}
func (c *APIClient) UpdateDatasource(id int, datasourceJson io.Reader) error {
updateUrl := makeUrl(c.BaseUrl, "/api/datasources/"+strconv.Itoa(id))
return c.doPut(updateUrl, datasourceJson)
}
func (c *APIClient) UpdateNotificationChannel(id int, notificationChannelJSON io.Reader) error {
updateUrl := makeUrl(c.BaseUrl, "/api/alert-notifications/"+strconv.Itoa(id))
return c.doPut(updateUrl, notificationChannelJSON)
}
func (c *APIClient) CreateDashboard(dashboardJSON io.Reader) error {
return c.doPost(makeUrl(c.BaseUrl, "/api/dashboards/db"), dashboardJSON)
}
func (c *APIClient) CreateDatasource(datasourceJSON io.Reader) error {
return c.doPost(makeUrl(c.BaseUrl, "/api/datasources"), datasourceJSON)
}
func (c *APIClient) CreateNotificationChannel(notificationChannelJSON io.Reader) error {
return c.doPost(makeUrl(c.BaseUrl, "/api/alert-notifications"), notificationChannelJSON)
}
func (c *APIClient) CreateFolder(folderJSON io.Reader) error {
return c.doPost(makeUrl(c.BaseUrl, "/api/folders"), folderJSON)
}
func (c *APIClient) CreateUser(userJSON io.Reader) error {
return c.doPost(makeUrl(c.BaseUrl, "/api/admin/users"), userJSON)
}
func (c *APIClient) doPut(url string, dataJSON io.Reader) error {
req, err := http.NewRequest("PUT", url, dataJSON)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
if os.Getenv("GRAFANA_BEARER_TOKEN") != "" {
req.Header.Add("Authorization", "Bearer "+os.Getenv("GRAFANA_BEARER_TOKEN"))
}
return c.doRequest(req)
}
func (c *APIClient) doPost(url string, dataJSON io.Reader) error {
req, err := http.NewRequest("POST", url, dataJSON)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
if os.Getenv("GRAFANA_BEARER_TOKEN") != "" {
req.Header.Add("Authorization", "Bearer "+os.Getenv("GRAFANA_BEARER_TOKEN"))
}
return c.doRequest(req)
}
func (c *APIClient) doRequest(req *http.Request) error {
resp, err := c.HTTPClient.Do(req)
if err != nil {
for strings.Contains(err.Error(), "connection refused") {
level.Error(c.logger).Log("err", err.Error())
level.Info(c.logger).Log("msg", "Perhaps Grafana is not ready. Waiting for 8 seconds and retry again...")
time.Sleep(8 * time.Second)
resp, err = c.HTTPClient.Do(req)
if err == nil {
break
}
}
}
if err != nil {
return err
}
defer resp.Body.Close()
response, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Unexpected status code returned from Grafana API (got: %d, expected: 200, msg:%s)", resp.StatusCode, string(response))
}
return nil
}
// return a new APIClient
func New(baseUrl *url.URL, id int, logger log.Logger) *APIClient {
return &APIClient{
BaseUrl: baseUrl,
HTTPClient: http.DefaultClient,
Id: id,
logger: logger,
}
}
// build url with grafana url and api endpoint
func makeUrl(baseURL *url.URL, endpoint string) string {
result := *baseURL
result.Path = path.Join(result.Path, endpoint)
return result.String()
}
| [
"\"GRAFANA_BEARER_TOKEN\"",
"\"GRAFANA_BEARER_TOKEN\"",
"\"GRAFANA_BEARER_TOKEN\"",
"\"GRAFANA_BEARER_TOKEN\""
] | [] | [
"GRAFANA_BEARER_TOKEN"
] | [] | ["GRAFANA_BEARER_TOKEN"] | go | 1 | 0 | |
kushillu/cid/ci.py | import subprocess
import shlex
from time import sleep
import datetime
import shutil
import re
from django.conf import settings
from django.core.files import File
from django.utils import timezone
import thread
import traceback
import tempfile
import pytz
import requests
import os
import zipfile
from kushillu.models import BuildInfo
from . import cidocker, github, common
from settings import MAX_CONCURRENT_BUILDS
def build(bi):
thread.start_new_thread(BuildProcess.start_build, (bi,))
def check(bi):
return BuildProcess.check_docker(bi)
class BuildProcess(object):
def __init__(self, build_info):
assert isinstance(build_info, BuildInfo), 'build_info must be an instance of BuildInfo, not %s' % \
build_info.__class__.__name__
self.build_info = build_info
self.project = build_info.project
self.token = self.project.github_token
self.valid_token = isinstance(self.token, basestring) and len(self.token) > 0
self.badge_updates = self.build_info.on_master
@classmethod
def start_build(cls, build_info):
"""
run the build script.
"""
self = BuildProcess(build_info)
try:
self.build_info.start = datetime.datetime.now().replace(tzinfo=pytz.UTC)
self.build_info.process_log = ''
self._delete_old_containers()
self.build_info.temp_dir = tempfile.mkdtemp(prefix='cid_src_tmp')
self._set_url()
self._log('doing badge updates: %r' % self.badge_updates)
self.build_info.save()
self._update_status('pending', 'CI build underway')
self._set_svg('in_progress')
self.build_info.save()
self._download()
self.build_info.save()
self._zip_save_repo()
self.build_info.save()
self._log('STARTING DOCKER:')
self.build_info.container = cidocker.start_ci(self.project.docker_image, self.build_info.temp_dir)
self.build_info.container_exists = True
self.build_info.save()
while True:
sleep(settings.THREAD_CHECK_RATE)
bi = self._check_docker()
if bi.complete:
break
except (common.KnownError, common.CommandError), e:
self._log('%s: %s' % (e.__class__.__name__, str(e)), '')
self._process_error()
except Exception:
self._log(traceback.format_exc())
self._process_error()
finally:
self.build_info.save()
return self.build_info
@classmethod
def check_docker(cls, build_info):
"""
check status of a build to see if it's finished.
"""
self = BuildProcess(build_info)
bi = self._check_docker()
self._check_queue()
return bi
def _check_docker(self):
if self.build_info.complete:
return self.build_info
try:
if not self.build_info.container_exists:
return self.build_info
status = cidocker.check_progress(self.build_info.container)
if not status:
return self.build_info
exit_code, finished, logs, con_inspection = status
self.build_info.test_success = self.build_info.project.script_split in logs
if self.build_info.test_success:
self.build_info.test_passed = exit_code == 0
process_log, ci_log = logs.split(self.build_info.project.script_split, 1)
self.build_info.process_log += '\n' + process_log
self.build_info.ci_log = ci_log
self.build_info.container_inspection = con_inspection
if self.project.coverage_regex:
m = re.search(self.project.coverage_regex, self.build_info.ci_log)
if m:
try:
self.build_info.coverage = float(m.groups()[0])
except (ValueError, IndexError):
pass
else:
self.build_info.process_log += '\n' + logs
self._log('DOCKER FINISHED:')
shutil.rmtree(self.build_info.temp_dir, ignore_errors=True)
self.build_info.complete = True
self.build_info.finished = finished
if self.build_info.test_passed:
msg = 'CI Success'
if isinstance(self.build_info.coverage, float):
msg += ', %0.2f%% coverage' % self.build_info.coverage
self._update_status('success', msg)
else:
self._update_status('failure', 'Tests failed')
self._set_svg(self.build_info.test_passed)
except common.KnownError, e:
raise e
except Exception:
self._log(traceback.format_exc())
self._process_error()
finally:
self.build_info.save()
return self.build_info
def _delete_old_containers(self):
delay = settings.CONTAINER_DELETE_MINUTES
if delay < 0:
self._log('Not deleting old containers.')
return
n = datetime.datetime.now().replace(tzinfo=pytz.UTC) - datetime.timedelta(minutes=delay)
del_con_ids = BuildInfo.objects.filter(finished__lt=n).values_list('container', flat=True)
deleted_cons = cidocker.delete_old_containers(del_con_ids)
BuildInfo.objects.filter(container__in=deleted_cons).update(container_exists=False)
self._log('%d old containers deleted.' % len(deleted_cons))
def _process_error(self):
self._update_status('error', 'Error running tests')
self._set_svg(False)
if self.build_info.temp_dir:
shutil.rmtree(self.build_info.temp_dir, ignore_errors=True)
self.build_info.test_success = False
self.build_info.complete = True
self.build_info.finished = timezone.now()
@staticmethod
def _check_queue():
"""
Check if a new build can begin, if so start them
"""
if BuildInfo.objects.filter(complete=False, queued=False).count() < MAX_CONCURRENT_BUILDS:
queue_first = BuildInfo.objects.filter(queued=True).order_by('id').first()
if queue_first:
queue_first.queued = False
queue_first.save()
build(queue_first)
def _set_url(self):
"""
generate the url which will be used to clone the repo.
"""
token = ''
if self.project.private and self.valid_token:
token = self.token + '@'
self.url = 'https://%sgithub.com/%s/%s.git' % (token, self.project.github_user, self.project.github_repo)
self._log('clone url: %s' % self.url)
def _update_status(self, status, message):
assert status in ['pending', 'success', 'error', 'failure']
if not self.build_info.status_url or not settings.SET_STATUS:
return
if not self.valid_token:
self._log('WARNING: no valid token found, cannot update status of pull request')
return
payload = {
'state': status,
'description': message,
'context': common.UPDATE_CONTEXT,
'target_url': self.build_info.project.update_url + str(self.build_info.id)
}
_, r = github.github_api(
url=self.build_info.status_url,
token=self.token,
method=requests.post,
data=payload,
extra_headers={'Content-type': 'application/json'})
self._log('updated pull request, status "%s", response: %d' % (status, r.status_code))
if r.status_code != 201:
self._log('received unexpected status code, response code')
self._log('response headers: %r' % r.headers)
self._log('url posted to: %s' % self.build_info.status_url)
self._log('payload: %r' % payload)
self._log('text: %r' % r.text[:1000])
def _download(self):
self._log('cloning...')
commands = 'git clone %s %s' % (self.url, self.build_info.temp_dir)
self._execute(commands)
self._log('cloned code successfully')
if self.build_info.fetch_cmd:
self._log('fetching branch ' + self.build_info.fetch_cmd)
commands = ['git fetch origin ' + self.build_info.fetch_cmd]
if self.build_info.fetch_branch:
commands.append('git checkout ' + self.build_info.fetch_branch)
self._execute(commands)
if self.build_info.sha:
self._log('checkout out ' + self.build_info.sha)
self._execute('git checkout ' + self.build_info.sha)
def _zip_save_repo(self):
self._log('zipping repo...')
count = 0
with tempfile.TemporaryFile(suffix='.zip') as temp_file:
with zipfile.ZipFile(temp_file, 'w') as ztemp_file:
for root, dirs, files in os.walk(self.build_info.temp_dir):
for f in files:
full_path = os.path.join(root, f)
local_path = full_path.replace(self.build_info.temp_dir, '').lstrip('/')
ztemp_file.write(full_path, local_path)
count += 1
self._log('zipped %d files to archive, saving zip file...' % count)
self.build_info.archive.save(temp_file.name, File(temp_file))
def _execute(self, commands):
if isinstance(commands, basestring):
commands = [commands]
for command in commands:
if command.strip().startswith('#'):
self._log(command, 'SKIP> ')
continue
self._log(command, 'EXEC> ')
cargs = shlex.split(command)
try:
cienv = {}# os.environ.copy()
cienv['CIDONKEY'] = '1'
p = subprocess.Popen(cargs,
cwd=self.build_info.temp_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=cienv)
stdout, stderr = p.communicate()
if len(stdout) > 0:
self._log(stdout, '')
if p.returncode != 0:
raise common.CommandError(stderr)
elif len(stderr) > 0:
self._log(stderr)
except common.CommandError, e:
raise e
except Exception, e:
raise common.KnownError('%s: %s' % (e.__class__.__name__, str(e)))
def _set_svg(self, status):
if not self.badge_updates:
return
if status == 'in_progress':
status_svg = 'in_progress.svg'
else:
status_svg = 'passing.svg' if status else 'failing.svg'
self._log('setting status svg to %s' % status_svg)
self.project.status_svg = status_svg
self.project.save()
def _message(self, message):
if not message.endswith('\n'):
message += '\n'
self.build_info.process_log += message
def _log(self, line, prefix='#> '):
self._message(prefix + line.strip('\n\r \t'))
| [] | [] | [] | [] | [] | python | 0 | 0 | |
app.py | #!/usr/bin/env python
# This file may be used instead of Apache mod_wsgi to run your python
# web application in a different framework. A few examples are
# provided (cherrypi, gevent), but this file may be altered to run
# whatever framework is desired - or a completely customized service.
#
import imp
import os
import sys
try:
virtenv = os.path.join(
os.environ.get(
'OPENSHIFT_PYTHON_DIR',
'.'),
'virtenv')
python_version = "python" + \
str(sys.version_info[0]) + "." + str(sys.version_info[1])
os.environ['PYTHON_EGG_CACHE'] = os.path.join(
virtenv, 'lib', python_version, 'site-packages')
virtualenv = os.path.join(virtenv, 'bin', 'activate_this.py')
if(sys.version_info[0] < 3):
execfile(virtualenv, dict(__file__=virtualenv))
else:
exec(open(virtualenv).read(), dict(__file__=virtualenv))
except IOError:
pass
#
# IMPORTANT: Put any additional includes below this line. If placed above this
# line, it's possible required libraries won't be in your searchable path
#
#
# main():
#
if __name__ == '__main__':
application = imp.load_source('app', 'flaskapp.py')
port = application.app.config['PORT']
ip = application.app.config['IP']
app_name = application.app.config['APP_NAME']
host_name = application.app.config['HOST_NAME']
fwtype = "wsgiref"
for fw in ("gevent", "cherrypy", "flask"):
try:
imp.find_module(fw)
fwtype = fw
except ImportError:
pass
print(
'Starting WSGIServer type %s on %s:%d ... ' %
(fwtype, ip, port))
if fwtype == "gevent":
from gevent.pywsgi import WSGIServer
WSGIServer((ip, port), application.app).serve_forever()
elif fwtype == "cherrypy":
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(
(ip, port), application.app, server_name=host_name)
server.start()
elif fwtype == "flask":
from flask import Flask
server = Flask(__name__)
server.wsgi_app = application.app
server.run(host=ip, port=port)
else:
from wsgiref.simple_server import make_server
make_server(ip, port, application.app).serve_forever()
| [] | [] | [
"PYTHON_EGG_CACHE",
"OPENSHIFT_PYTHON_DIR"
] | [] | ["PYTHON_EGG_CACHE", "OPENSHIFT_PYTHON_DIR"] | python | 2 | 0 | |
real/experiments/jare.py | import os
from subprocess import call
import sys
import time
# Job id and gpu_id
if len(sys.argv) > 2:
job_id = int(sys.argv[1])
gpu_id = str(sys.argv[2])
print('job_id: {}, gpu_id: {}'.format(job_id, gpu_id))
elif len(sys.argv) > 1:
job_id = int(sys.argv[1])
gpu_id = '5'
print('job_id: {}, missing gpu_id (use default {})'.format(job_id, gpu_id))
else:
print('Missing argument: job_id and gpu_id.')
quit()
# Executables
executable = 'python3'
# Arguments
architecture = ['conv4', 'conv4_nobn', 'dcgan4_nobn', 'dcgan4_nobn', 'resnet_v1', 'resnet_v2']
gantype = ['standard', 'standard', 'standard', 'standard', 'standard', 'standard']
opt_type = ['adam', 'adam', 'adam', 'rmsprop', 'adam', 'adam']
reg_param = ['100', '100', '100', '100', '100', '100']
lr = ['1e-4', '1e-4', '1e-4', '1e-4', '1e-4', '1e-4']
beta1 = ['0.5', '0.5', '0.5', '0.5', '0.5', '0.5']
bs = '64'
z_dim = '128'
beta2 = '0.999'
seed = '125'
gf_dim = '64'
df_dim = '64'
# Paths
rootdir = '..'
scriptname = 'run.py'
cwd = os.path.dirname(os.path.abspath(__file__))
outdir = os.path.join(cwd, 'out', time.strftime("%Y%m%d"),
'jare_{}_{}_{}_bs{}_zdim{}_lr{}_beta1#{}_beta2#{}_gfdim{}_dfdim{}_reg{}_seed{}'.format(
architecture[job_id], gantype[job_id],
opt_type[job_id], bs, z_dim,
lr[job_id], beta1[job_id], beta2, gf_dim, df_dim, reg_param[job_id], seed))
args = [
# Architecture
'--image-size', '375',
'--output-size', '32',
'--beta1', beta1[job_id],
'--beta2', beta2,
'--c-dim', '3',
'--z-dim', z_dim,
'--gf-dim', gf_dim,
'--df-dim', df_dim,
'--reg-param', reg_param[job_id],
'--g-architecture', architecture[job_id],
'--d-architecture', architecture[job_id],
'--gan-type', gantype[job_id],
# Training
'--seed', seed,
'--optimizer', 'jare',
'--opt-type', opt_type[job_id],
'--nsteps', '500000',
'--ntest', '5000',
'--learning-rate', lr[job_id],
'--batch-size', bs,
'--log-dir', os.path.join(outdir, 'tf_logs'),
'--sample-dir', os.path.join(outdir, 'samples'),
'--is-inception-scores',
'--fid-type', '1',
'--inception-dir', './inception',
# Data set
'--dataset', 'cifar-10',
'--data-dir', './data',
'--split', 'train'
]
# Run
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
my_env = os.environ.copy()
call([executable, scriptname] + args, env=my_env, cwd=rootdir)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/cluster-node-tuning-operator/main.go | package main
import (
"context"
"os"
"runtime"
"strconv"
"time"
stub "github.com/openshift/cluster-node-tuning-operator/pkg/stub"
sdk "github.com/operator-framework/operator-sdk/pkg/sdk"
k8sutil "github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/sirupsen/logrus"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
const (
resyncPeriodDefault int64 = 60
)
func printVersion() {
logrus.Infof("Go Version: %s", runtime.Version())
logrus.Infof("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)
logrus.Infof("operator-sdk Version: %v", sdkVersion.Version)
}
func main() {
var resyncPeriodDuration int64 = resyncPeriodDefault
printVersion()
sdk.ExposeMetricsPort()
resource := "tuned.openshift.io/v1alpha1"
kind := "Tuned"
namespace, err := k8sutil.GetWatchNamespace()
if err != nil {
logrus.Fatalf("failed to get watch namespace: %v", err)
}
if os.Getenv("RESYNC_PERIOD") != "" {
resyncPeriodDuration, err = strconv.ParseInt(os.Getenv("RESYNC_PERIOD"), 10, 64)
if err != nil {
logrus.Errorf("Cannot parse RESYNC_PERIOD (%s), using %d", os.Getenv("RESYNC_PERIOD"), resyncPeriodDefault)
resyncPeriodDuration = resyncPeriodDefault
}
}
resyncPeriod := time.Duration(resyncPeriodDuration) * time.Second
logrus.Infof("Watching %s, %s, %s, %d", resource, kind, namespace, resyncPeriod)
sdk.Watch(resource, kind, namespace, resyncPeriod)
sdk.Handle(stub.NewHandler())
sdk.Run(context.TODO())
}
| [
"\"RESYNC_PERIOD\"",
"\"RESYNC_PERIOD\"",
"\"RESYNC_PERIOD\""
] | [] | [
"RESYNC_PERIOD"
] | [] | ["RESYNC_PERIOD"] | go | 1 | 0 | |
pkg/rpc/legacy/server.go | // Copyright (c) 2013-2017 The btcsuite developers
package legacyrpc
import (
"crypto/sha256"
"crypto/subtle"
"encoding/base64"
js "encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"sync"
"sync/atomic"
"time"
"git.parallelcoin.io/dev/pod/pkg/rpc/json"
cl "git.parallelcoin.io/dev/pod/pkg/util/cl"
"git.parallelcoin.io/dev/pod/pkg/wallet"
chain "git.parallelcoin.io/dev/pod/pkg/wallet/chain"
"github.com/btcsuite/websocket"
)
type websocketClient struct {
conn *websocket.Conn
authenticated bool
remoteAddr string
allRequests chan []byte
responses chan []byte
quit chan struct{} // closed on disconnect
wg sync.WaitGroup
}
func newWebsocketClient(
c *websocket.Conn, authenticated bool, remoteAddr string) *websocketClient {
return &websocketClient{
conn: c,
authenticated: authenticated,
remoteAddr: remoteAddr,
allRequests: make(chan []byte),
responses: make(chan []byte),
quit: make(chan struct{}),
}
}
func (c *websocketClient) send(b []byte) error {
select {
case c.responses <- b:
return nil
case <-c.quit:
return errors.New("websocket client disconnected")
}
}
// Server holds the items the RPC server may need to access (auth,
// config, shutdown, etc.)
type Server struct {
httpServer http.Server
wallet *wallet.Wallet
walletLoader *wallet.Loader
chainClient chain.Interface
handlerLookup func(string) (requestHandler, bool)
handlerMu sync.Mutex
listeners []net.Listener
authsha [sha256.Size]byte
upgrader websocket.Upgrader
maxPostClients int64 // Max concurrent HTTP POST clients.
maxWebsocketClients int64 // Max concurrent websocket clients.
wg sync.WaitGroup
quit chan struct{}
quitMtx sync.Mutex
requestShutdownChan chan struct{}
}
// jsonAuthFail sends a message back to the client if the http auth is rejected.
func jsonAuthFail(
w http.ResponseWriter) {
w.Header().Add("WWW-Authenticate", `Basic realm="mod RPC"`)
http.Error(w, "401 Unauthorized.", http.StatusUnauthorized)
}
// NewServer creates a new server for serving legacy RPC client connections,
// both HTTP POST and websocket.
func NewServer(
opts *Options, walletLoader *wallet.Loader, listeners []net.Listener) *Server {
serveMux := http.NewServeMux()
const rpcAuthTimeoutSeconds = 10
server := &Server{
httpServer: http.Server{
Handler: serveMux,
// Timeout connections which don't complete the initial
// handshake within the allowed timeframe.
ReadTimeout: time.Second * rpcAuthTimeoutSeconds,
},
walletLoader: walletLoader,
maxPostClients: opts.MaxPOSTClients,
maxWebsocketClients: opts.MaxWebsocketClients,
listeners: listeners,
// A hash of the HTTP basic auth string is used for a constant
// time comparison.
authsha: sha256.Sum256(httpBasicAuth(opts.Username, opts.Password)),
upgrader: websocket.Upgrader{
// Allow all origins.
CheckOrigin: func(r *http.Request) bool { return true },
},
quit: make(chan struct{}),
requestShutdownChan: make(chan struct{}, 1),
}
serveMux.Handle("/", throttledFn(opts.MaxPOSTClients,
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "close")
w.Header().Set("Content-Type", "application/json")
r.Close = true
if err := server.checkAuthHeader(r); err != nil {
log <- cl.Wrn("unauthorized client connection attempt")
jsonAuthFail(w)
return
}
server.wg.Add(1)
server.postClientRPC(w, r)
server.wg.Done()
}))
serveMux.Handle("/ws", throttledFn(opts.MaxWebsocketClients,
func(w http.ResponseWriter, r *http.Request) {
authenticated := false
switch server.checkAuthHeader(r) {
case nil:
authenticated = true
case ErrNoAuth:
// nothing
default:
// If auth was supplied but incorrect, rather than simply
// being missing, immediately terminate the connection.
log <- cl.Wrn("disconnecting improperly authorized websocket client")
jsonAuthFail(w)
return
}
conn, err := server.upgrader.Upgrade(w, r, nil)
if err != nil {
log <- cl.Warnf{
"cannot websocket upgrade client %s: %v",
r.RemoteAddr, err,
}
return
}
wsc := newWebsocketClient(conn, authenticated, r.RemoteAddr)
server.websocketClientRPC(wsc)
}))
for _, lis := range listeners {
server.serve(lis)
}
return server
}
// httpBasicAuth returns the UTF-8 bytes of the HTTP Basic authentication
// string:
//
// "Basic " + base64(username + ":" + password)
func httpBasicAuth(
username, password string) []byte {
const header = "Basic "
base64 := base64.StdEncoding
b64InputLen := len(username) + len(":") + len(password)
b64Input := make([]byte, 0, b64InputLen)
b64Input = append(b64Input, username...)
b64Input = append(b64Input, ':')
b64Input = append(b64Input, password...)
output := make([]byte, len(header)+base64.EncodedLen(b64InputLen))
copy(output, header)
base64.Encode(output[len(header):], b64Input)
return output
}
// serve serves HTTP POST and websocket RPC for the legacy JSON-RPC RPC server.
// This function does not block on lis.Accept.
func (s *Server) serve(lis net.Listener) {
s.wg.Add(1)
go func() {
log <- cl.Infof{"RPC server listening on %s", lis.Addr()}
err := s.httpServer.Serve(lis)
log <- cl.Tracef{"finished serving RPC: %v", err}
s.wg.Done()
}()
}
// RegisterWallet associates the legacy RPC server with the wallet. This
// function must be called before any wallet RPCs can be called by clients.
func (s *Server) RegisterWallet(w *wallet.Wallet) {
s.handlerMu.Lock()
s.wallet = w
s.handlerMu.Unlock()
}
// Stop gracefully shuts down the rpc server by stopping and disconnecting all clients, disconnecting the chain server connection, and closing the wallet's account files. This blocks until shutdown completes.
func (s *Server) Stop() {
s.quitMtx.Lock()
select {
case <-s.quit:
s.quitMtx.Unlock()
return
default:
}
// Stop the connected wallet and chain server, if any.
s.handlerMu.Lock()
wallet := s.wallet
chainClient := s.chainClient
s.handlerMu.Unlock()
if wallet != nil {
wallet.Stop()
}
if chainClient != nil {
chainClient.Stop()
}
// Stop all the listeners.
for _, listener := range s.listeners {
err := listener.Close()
if err != nil {
log <- cl.Errorf{
"cannot close listener `%s`: %v",
listener.Addr(), err,
}
}
}
// Signal the remaining goroutines to stop.
close(s.quit)
s.quitMtx.Unlock()
// First wait for the wallet and chain server to stop, if they
// were ever set.
if wallet != nil {
wallet.WaitForShutdown()
}
if chainClient != nil {
chainClient.WaitForShutdown()
}
// Wait for all remaining goroutines to exit.
s.wg.Wait()
}
// SetChainServer sets the chain server client component needed to run a fully
// functional bitcoin wallet RPC server. This can be called to enable RPC
// passthrough even before a loaded wallet is set, but the wallet's RPC client
// is preferred.
func (s *Server) SetChainServer(chainClient chain.Interface) {
s.handlerMu.Lock()
s.chainClient = chainClient
s.handlerMu.Unlock()
}
// handlerClosure creates a closure function for handling requests of the given
// method. This may be a request that is handled directly by btcwallet, or
// a chain server request that is handled by passing the request down to pod.
//
// NOTE: These handlers do not handle special cases, such as the authenticate
// method. Each of these must be checked beforehand (the method is already
// known) and handled accordingly.
func (s *Server) handlerClosure(request *json.Request) lazyHandler {
s.handlerMu.Lock()
// With the lock held, make copies of these pointers for the closure.
wallet := s.wallet
chainClient := s.chainClient
if wallet != nil && chainClient == nil {
chainClient = wallet.ChainClient()
s.chainClient = chainClient
}
s.handlerMu.Unlock()
return lazyApplyHandler(request, wallet, chainClient)
}
// ErrNoAuth represents an error where authentication could not succeed
// due to a missing Authorization HTTP header.
var ErrNoAuth = errors.New("no auth")
// checkAuthHeader checks the HTTP Basic authentication supplied by a client
// in the HTTP request r. It errors with ErrNoAuth if the request does not
// contain the Authorization header, or another non-nil error if the
// authentication was provided but incorrect.
//
// This check is time-constant.
func (s *Server) checkAuthHeader(r *http.Request) error {
authhdr := r.Header["Authorization"]
if len(authhdr) == 0 {
return ErrNoAuth
}
authsha := sha256.Sum256([]byte(authhdr[0]))
cmp := subtle.ConstantTimeCompare(authsha[:], s.authsha[:])
if cmp != 1 {
return errors.New("bad auth")
}
return nil
}
// throttledFn wraps an http.HandlerFunc with throttling of concurrent active
// clients by responding with an HTTP 429 when the threshold is crossed.
func throttledFn(
threshold int64, f http.HandlerFunc) http.Handler {
return throttled(threshold, f)
}
// throttled wraps an http.Handler with throttling of concurrent active
// clients by responding with an HTTP 429 when the threshold is crossed.
func throttled(
threshold int64, h http.Handler) http.Handler {
var active int64
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
current := atomic.AddInt64(&active, 1)
defer atomic.AddInt64(&active, -1)
if current-1 >= threshold {
log <- cl.Warnf{
"reached threshold of %d concurrent active clients", threshold,
}
http.Error(w, "429 Too Many Requests", 429)
return
}
h.ServeHTTP(w, r)
})
}
// sanitizeRequest returns a sanitized string for the request which may be
// safely logged. It is intended to strip private keys, passphrases, and any
// other secrets from request parameters before they may be saved to a log file.
func sanitizeRequest(
r *json.Request) string {
// These are considered unsafe to log, so sanitize parameters.
switch r.Method {
case "encryptwallet", "importprivkey", "importwallet",
"signrawtransaction", "walletpassphrase",
"walletpassphrasechange":
return fmt.Sprintf(`{"id":%v,"method":"%s","params":SANITIZED %d parameters}`,
r.ID, r.Method, len(r.Params))
}
return fmt.Sprintf(`{"id":%v,"method":"%s","params":%v}`, r.ID,
r.Method, r.Params)
}
// idPointer returns a pointer to the passed ID, or nil if the interface is nil.
// Interface pointers are usually a red flag of doing something incorrectly,
// but this is only implemented here to work around an oddity with json,
// which uses empty interface pointers for response IDs.
func idPointer(
id interface{}) (p *interface{}) {
if id != nil {
p = &id
}
return
}
// invalidAuth checks whether a websocket request is a valid (parsable)
// authenticate request and checks the supplied username and passphrase
// against the server auth.
func (s *Server) invalidAuth(req *json.Request) bool {
cmd, err := json.UnmarshalCmd(req)
if err != nil {
return false
}
authCmd, ok := cmd.(*json.AuthenticateCmd)
if !ok {
return false
}
// Check credentials.
login := authCmd.Username + ":" + authCmd.Passphrase
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(login))
authSha := sha256.Sum256([]byte(auth))
return subtle.ConstantTimeCompare(authSha[:], s.authsha[:]) != 1
}
func (s *Server) websocketClientRead(wsc *websocketClient) {
for {
_, request, err := wsc.conn.ReadMessage()
if err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF {
log <- cl.Warnf{
"websocket receive failed from client %s: %v",
wsc.remoteAddr, err,
}
}
close(wsc.allRequests)
break
}
wsc.allRequests <- request
}
}
func (s *Server) websocketClientRespond(wsc *websocketClient) {
// A for-select with a read of the quit channel is used instead of a
// for-range to provide clean shutdown. This is necessary due to
// WebsocketClientRead (which sends to the allRequests chan) not closing
// allRequests during shutdown if the remote websocket client is still
// connected.
out:
for {
select {
case reqBytes, ok := <-wsc.allRequests:
if !ok {
// client disconnected
break out
}
var req json.Request
err := js.Unmarshal(reqBytes, &req)
if err != nil {
if !wsc.authenticated {
// Disconnect immediately.
break out
}
resp := makeResponse(req.ID, nil,
json.ErrRPCInvalidRequest)
mresp, err := js.Marshal(resp)
// We expect the marshal to succeed. If it
// doesn't, it indicates some non-marshalable
// type in the response.
if err != nil {
panic(err)
}
err = wsc.send(mresp)
if err != nil {
break out
}
continue
}
if req.Method == "authenticate" {
if wsc.authenticated || s.invalidAuth(&req) {
// Disconnect immediately.
break out
}
wsc.authenticated = true
resp := makeResponse(req.ID, nil, nil)
// Expected to never fail.
mresp, err := js.Marshal(resp)
if err != nil {
panic(err)
}
err = wsc.send(mresp)
if err != nil {
break out
}
continue
}
if !wsc.authenticated {
// Disconnect immediately.
break out
}
switch req.Method {
case "stop":
resp := makeResponse(req.ID,
"wallet stopping.", nil)
mresp, err := js.Marshal(resp)
// Expected to never fail.
if err != nil {
panic(err)
}
err = wsc.send(mresp)
if err != nil {
break out
}
s.requestProcessShutdown()
break
default:
req := req // Copy for the closure
f := s.handlerClosure(&req)
wsc.wg.Add(1)
go func() {
resp, jsonErr := f()
mresp, err := json.MarshalResponse(req.ID, resp, jsonErr)
if err != nil {
log <- cl.Error{
"unable to marshal response:", err,
}
} else {
_ = wsc.send(mresp)
}
wsc.wg.Done()
}()
}
case <-s.quit:
break out
}
}
// allow client to disconnect after all handler goroutines are done
wsc.wg.Wait()
close(wsc.responses)
s.wg.Done()
}
func (s *Server) websocketClientSend(wsc *websocketClient) {
const deadline time.Duration = 2 * time.Second
out:
for {
select {
case response, ok := <-wsc.responses:
if !ok {
// client disconnected
break out
}
err := wsc.conn.SetWriteDeadline(time.Now().Add(deadline))
if err != nil {
log <- cl.Warnf{
"cannot set write deadline on client %s: %v",
wsc.remoteAddr, err,
}
}
err = wsc.conn.WriteMessage(websocket.TextMessage,
response)
if err != nil {
log <- cl.Warnf{
"failed websocket send to client %s: %v", wsc.remoteAddr, err,
}
break out
}
case <-s.quit:
break out
}
}
close(wsc.quit)
log <- cl.Info{
"disconnected websocket client", wsc.remoteAddr,
}
s.wg.Done()
}
// websocketClientRPC starts the goroutines to serve JSON-RPC requests over a
// websocket connection for a single client.
func (s *Server) websocketClientRPC(wsc *websocketClient) {
log <- cl.Infof{
"new websocket client", wsc.remoteAddr,
}
// Clear the read deadline set before the websocket hijacked
// the connection.
if err := wsc.conn.SetReadDeadline(time.Time{}); err != nil {
log <- cl.Warn{
"cannot remove read deadline:", err,
}
}
// WebsocketClientRead is intentionally not run with the waitgroup
// so it is ignored during shutdown. This is to prevent a hang during
// shutdown where the goroutine is blocked on a read of the
// websocket connection if the client is still connected.
go s.websocketClientRead(wsc)
s.wg.Add(2)
go s.websocketClientRespond(wsc)
go s.websocketClientSend(wsc)
<-wsc.quit
}
// maxRequestSize specifies the maximum number of bytes in the request body
// that may be read from a client. This is currently limited to 4MB.
const maxRequestSize = 1024 * 1024 * 4
// postClientRPC processes and replies to a JSON-RPC client request.
func (s *Server) postClientRPC(w http.ResponseWriter, r *http.Request) {
body := http.MaxBytesReader(w, r.Body, maxRequestSize)
rpcRequest, err := ioutil.ReadAll(body)
if err != nil {
// TODO: what if the underlying reader errored?
http.Error(w, "413 Request Too Large.",
http.StatusRequestEntityTooLarge)
return
}
// First check whether wallet has a handler for this request's method.
// If unfound, the request is sent to the chain server for further
// processing. While checking the methods, disallow authenticate
// requests, as they are invalid for HTTP POST clients.
var req json.Request
err = js.Unmarshal(rpcRequest, &req)
if err != nil {
resp, err := json.MarshalResponse(req.ID, nil, json.ErrRPCInvalidRequest)
if err != nil {
log <- cl.Error{
"Unable to marshal response:", err,
}
http.Error(w, "500 Internal Server Error",
http.StatusInternalServerError)
return
}
_, err = w.Write(resp)
if err != nil {
log <- cl.Warn{
"cannot write invalid request request to client:", err,
}
}
return
}
// Create the response and error from the request. Two special cases
// are handled for the authenticate and stop request methods.
var res interface{}
var jsonErr *json.RPCError
var stop bool
switch req.Method {
case "authenticate":
// Drop it.
return
case "stop":
stop = true
res = "mod stopping"
default:
res, jsonErr = s.handlerClosure(&req)()
}
// Marshal and send.
mresp, err := json.MarshalResponse(req.ID, res, jsonErr)
if err != nil {
log <- cl.Error{
"unable to marshal response:", err,
}
http.Error(w, "500 Internal Server Error", http.StatusInternalServerError)
return
}
_, err = w.Write(mresp)
if err != nil {
log <- cl.Warn{
"unable to respond to client:", err,
}
}
if stop {
s.requestProcessShutdown()
}
}
func (s *Server) requestProcessShutdown() {
select {
case s.requestShutdownChan <- struct{}{}:
default:
}
}
// RequestProcessShutdown returns a channel that is sent to when an authorized
// client requests remote shutdown.
func (s *Server) RequestProcessShutdown() <-chan struct{} {
return s.requestShutdownChan
}
| [] | [] | [] | [] | [] | go | null | null | null |
config/settings/production.py | """
Production Configurations
- Use WhiteNoise for serving static files
- Use Amazon's S3 for storing uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
- Use opbeat for error reporting
"""
import logging
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# opbeat integration
# See https://opbeat.com/languages/django/
INSTALLED_APPS += ['opbeat.contrib.django', ]
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE = ['opbeat.contrib.django.middleware.OpbeatAPMMiddleware', ] + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
control = 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY, AWS_EXPIRY)
AWS_HEADERS = {
'Cache-Control': bytes(control, encoding='latin-1')
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# COMPRESSOR
# ------------------------------------------------------------------------------
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
COMPRESS_URL = STATIC_URL
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='coworker <[email protected]>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[coworker]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| [] | [] | [] | [] | [] | python | 0 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"Register": (
"https://circuitpython.readthedocs.io/projects/register/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit ServoKit Library"
copyright = "2018 Kattni Rembor"
author = "Kattni Rembor"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitServokitLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitServoKitLibrary.tex",
"AdafruitServoKit Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitServoKitlibrary",
"Adafruit ServoKit Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitServoKitLibrary",
"Adafruit ServoKit Library Documentation",
author,
"AdafruitServoKitLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
classh/wsgi.py | """
WSGI config for classh project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'classh.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/test/java/MpesaIntegrationTest.java | import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import tech.bytespot.mpesa_api.configurations.MpesaConfiguration;
import tech.bytespot.mpesa_api.configurations.MpesaUtils;
import tech.bytespot.mpesa_api.user.MpesaService;
import tech.bytespot.mpesa_api.utils.MpesaConstants;
import tech.bytespot.mpesa_api.utils.MpesaException;
import tech.bytespot.mpesa_api.utils.core.Commands;
import tech.bytespot.mpesa_api.utils.core.Identifiers;
import tech.bytespot.mpesa_api.wrappers.user.AccountBalanceRequest;
import tech.bytespot.mpesa_api.wrappers.user.B2B_Request;
import tech.bytespot.mpesa_api.wrappers.user.B2C_Request;
import tech.bytespot.mpesa_api.wrappers.user.C2B_RegisterRequest;
import tech.bytespot.mpesa_api.wrappers.user.C2B_SimulateRequest;
import tech.bytespot.mpesa_api.wrappers.user.Reversal_Request;
import tech.bytespot.mpesa_api.wrappers.user.STKPush_Request;
import tech.bytespot.mpesa_api.wrappers.user.TransactionStatusRequest;
public class MpesaIntegrationTest {
private final static Logger LOGGER = Logger.getLogger(MpesaIntegrationTest.class.getName());
MpesaService mpesaService = new MpesaService();
String appKey = System.getenv("TEST_APPKEY");
String appSecret = System.getenv("TEST_APPSECRET");
String testMsisdn = System.getenv("TEST_PHONE_NO");
String teststkTransactionId = "ws_CO_260420202021051947";
String testStkAmount = "1";
String testB2CAmount = "10";
String testB2BAmount = "10";
String testAccountRef = "ACCOUNT123456789";
String testTillNo = "123456";
String testTransactionId = "ODQ41HBU28";
String testComment = "TEST COMMENT";
String testOccasion = "TEST OCCASION";
String baseUrl = System.getenv("TEST_BASE_URL");
String initiatorName = System.getenv("TEST_INITIATOR_NAME");
String initiatorPassword = System.getenv("TEST_INITIATOR_PASSWORD");
String securityCredential = System.getenv("TEST_SECURITY_CREDENTIAL");
String passKey = System.getenv("TEST_PASSKEY");
String token = "GDs2vGAglCInBKKazV9DxqRoFAKA";
MpesaConfiguration payinMpesaConfiguration = MpesaConfiguration.builder()
.shortcode("601468")
.shortcode2("600000")
.inAppMode(MpesaUtils.Test_Mode)
.withAppKey(appKey)
.withAppSecret(appSecret)
.withTestMsisdn("254708374149")
.setAccessToken(token)
.enableSTK("174379",
passKey,
baseUrl + "stk")
.enableC2B(baseUrl + "validation",
baseUrl + "confirmation")
.enableReversal(baseUrl + "reversal",
baseUrl + "reversal",
initiatorName,
initiatorPassword,
securityCredential
).enableBalanceCheck(baseUrl + "balance",
baseUrl + "balance",
initiatorName,
initiatorPassword,
securityCredential
).enableStatusCheck(baseUrl + "status",
baseUrl + "status",
initiatorName,
initiatorPassword,
securityCredential
).setHttpTimeouts(15, 15, TimeUnit.SECONDS)
.build();
MpesaConfiguration payOutMpesaConfiguration = MpesaConfiguration.builder()
.shortcode("601468")
.shortcode2("600000")
.inAppMode(MpesaUtils.Test_Mode)
.withAppKey(appKey)
.withAppSecret(appSecret)
.withTestMsisdn("254708374149")
.enableB2C(baseUrl + "b2c",
baseUrl + "b2c",
initiatorName,
initiatorPassword,
securityCredential
)
.enableB2B(baseUrl + "b2b",
baseUrl + "b2b",
initiatorName,
initiatorPassword,
securityCredential
).enableBalanceCheck(baseUrl + "balance",
baseUrl + "balance",
initiatorName,
initiatorPassword,
securityCredential
).enableStatusCheck(baseUrl + "status",
baseUrl + "status",
initiatorName,
initiatorPassword,
securityCredential
).setHttpTimeouts(15, 15, TimeUnit.SECONDS)
.build();
@Test
void stkPushRequest() throws MpesaException {
STKPush_Request request = STKPush_Request.builder()
.forPhoneNumber(testMsisdn)
.withAmount(testStkAmount)
.withDescription("test STK push")
.withAccountReference(testAccountRef)
.build();
var response = mpesaService.stkPushRequest(request, payinMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void stkPushQueryRequest() throws MpesaException {
var response = mpesaService.stkQueryRequest(teststkTransactionId, payinMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void c2bRegistrationRequest() throws MpesaException {
C2B_RegisterRequest request = C2B_RegisterRequest.builder()
.withResponseType(MpesaConstants.ResponseType_Completed)
.build();
var response = mpesaService.c2bRegisterRequest(request, payinMpesaConfiguration);
Assertions.assertTrue(response.getResponseDescription().equals("success"),
response.getResponseDescription());
}
@Test
void c2bSimulation() throws MpesaException {
C2B_SimulateRequest request = C2B_SimulateRequest.builder()
.forPhoneNumber(testMsisdn)
.withAmount(testB2BAmount)
.withReferenceNo(testAccountRef)
.withCommandId(Commands.CustomerPayBillOnline)
.build();
var response = mpesaService.c2bSimulateRequest(request, payinMpesaConfiguration);
Assertions.assertTrue(response.getResponseDescription().startsWith("Accept"),
response.getResponseDescription());
}
@Test
void reversalRequest() throws MpesaException {
Reversal_Request request = Reversal_Request.builder()
.forTransactionId(testTransactionId)
.withAmount(testB2BAmount)
.withComment(testComment)
.withOccasion(testOccasion)
.build();
var response = mpesaService.reversalRequest(request, payinMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void transactionStatusRequest() throws MpesaException {
TransactionStatusRequest request = TransactionStatusRequest.builder()
.forTransactionId(testTransactionId)
.useIdentifierType(String.valueOf(Identifiers.SHORTCODE))
.withComment(testComment)
.withOccasion(testOccasion)
.build();
var response = mpesaService.transactionStatusRequest(request, payinMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void accountBalanceRequest() throws MpesaException {
AccountBalanceRequest request = AccountBalanceRequest.builder()
.withComment(testComment)
.withIdentifierType(Identifiers.SHORTCODE)
.build();
var response = mpesaService.accountBalanceRequest(request, payinMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void b2cRequest() throws MpesaException {
B2C_Request request = B2C_Request.builder()
.forPhoneNumber(testMsisdn)
.withAmount(testB2CAmount)
.withCommandId(Commands.PromotionPayment)
.withOccasion(testOccasion)
.withComment(testComment)
.build();
var response = mpesaService.b2cRequest(request, payOutMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
@Test
void b2bRequest() throws MpesaException {
B2B_Request request = B2B_Request.builder()
.sendToTillNo(testTillNo)
.setReceiverIdentifierType(Identifiers.SHORTCODE)
.setSenderIdentifierType(Identifiers.SHORTCODE)
.withAmount(testB2BAmount)
.withReferenceNo(testAccountRef)
.withAmount(testB2CAmount)
.withCommandId(Commands.BusinessPayBill)
.withComment(testComment)
.build();
payOutMpesaConfiguration.getB2b().setSecurityCredential(null);
var response = mpesaService.b2bRequest(request, payOutMpesaConfiguration);
Assertions
.assertTrue(response.getResponseCode().equals("0"), response.getResponseDescription());
}
}
| [
"\"TEST_APPKEY\"",
"\"TEST_APPSECRET\"",
"\"TEST_PHONE_NO\"",
"\"TEST_BASE_URL\"",
"\"TEST_INITIATOR_NAME\"",
"\"TEST_INITIATOR_PASSWORD\"",
"\"TEST_SECURITY_CREDENTIAL\"",
"\"TEST_PASSKEY\""
] | [] | [
"TEST_PASSKEY",
"TEST_APPKEY",
"TEST_APPSECRET",
"TEST_SECURITY_CREDENTIAL",
"TEST_INITIATOR_NAME",
"TEST_INITIATOR_PASSWORD",
"TEST_BASE_URL",
"TEST_PHONE_NO"
] | [] | ["TEST_PASSKEY", "TEST_APPKEY", "TEST_APPSECRET", "TEST_SECURITY_CREDENTIAL", "TEST_INITIATOR_NAME", "TEST_INITIATOR_PASSWORD", "TEST_BASE_URL", "TEST_PHONE_NO"] | java | 8 | 0 | |
app/main/views.py | import datetime
import json
import os
import re
import requests
import time
from datetime import datetime
import csv
import praw
from flask import Blueprint, render_template, send_file
from flask import (
flash,
request,
)
from praw.models import MoreComments
from sqlalchemy.exc import InvalidRequestError
from app import db
from app.main.forms import ScrapeSubmissionForm, ShowCommentsForm
from app.models import EditableHTML, Submission, Comment
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('main/index.html')
@main.route('/about')
def about():
editable_html_obj = EditableHTML.get_editable_html('about')
return render_template(
'main/about.html', editable_html_obj=editable_html_obj)
@main.route('/scrape', methods=['GET', 'POST'])
def scrape():
form = ScrapeSubmissionForm()
if form.validate_on_submit():
id = form.submission_id.data
num_comments = scrape_reddit(id)
flash('{} comments successfully added for the submission'.format(num_comments),
'form-success')
return render_template('main/scrape.html', form=form)
@main.route('/csv', methods=['POST'])
def csv_writing():
id = request.form["id"]
file = get_or_create_csv(id)
return send_file(os.path.join("static", "csv", id + ".csv"))
def get_or_create_csv(id):
file_path = os.path.join("app", "static", "csv", id + ".csv")
if os.path.isfile(file_path):
return file_path
else:
tl_comments = Comment.query.filter_by(parent_id=id).order_by(Comment.created_utc.asc())
with open(file_path, 'w', newline='', encoding='utf-8') as file:
f = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC, delimiter='|')
f.writerow(['ID', 'author', 'body', 'parent_id', 'permalink', 'created_utc', 'score'])
write_csv(f, tl_comments)
return file_path
def write_csv(f, comments):
for comment in comments:
f.writerow([
comment.id, comment.author, comment.get_unescaped_body_no_newlines(), comment.parent_id, comment.permalink,
comment.created_utc.strftime("%m/%d/%Y, %H:%M:%S"), comment.score
])
if comment.get_children_count() > 0:
write_csv(f, comment.get_children())
@main.route('/txt', methods=['POST'])
def txt():
id = request.form["id"]
file = get_or_create_txt(id)
return send_file(os.path.join("static", "txt", id + ".txt"))
def get_or_create_txt(id):
file_path = os.path.join("app", "static", "txt", id + ".txt")
if os.path.isfile(file_path):
return file_path
else:
tl_comments = Comment.query.filter_by(parent_id=id).order_by(Comment.created_utc.asc())
f = open(file_path, "w", encoding="utf-8")
write(f, tl_comments)
f.close()
return file_path
def write(f, comments, level=0):
for comment in comments:
line =""
for i in range(0, level):
line = line + "\t"
line = line + "Author: " + comment.author + ", Commented on: " + comment.created_utc.strftime("%m/%d/%Y, %H:%M:%S") + "\n"
f.write(line)
line = ""
for i in range(0, level):
line = line + "\t"
line = line + comment.get_unescaped_body() + "\n\n"
f.write(line)
if comment.get_children_count() > 0:
write(f, comment.get_children(), level + 1)
@main.route('/read', methods=['GET', 'POST'], defaults={"page": 1})
@main.route('/read/<int:page>', methods=['GET', 'POST'])
def read(id=None, page=1):
per_page = 25
form = ShowCommentsForm()
if form.validate_on_submit():
id = form.submission_id.data.id
tl_comments = Comment.query.filter_by(parent_id=id).order_by(Comment.created_utc.desc())\
.paginate(page, per_page, error_out=False) # Get top level comments
return render_template('main/read.html', form=form, tl_comments=tl_comments, page=1, id=id)
id = request.args.get("id")
if id is not None:
tl_comments = Comment.query.filter_by(parent_id=id).order_by(Comment.created_utc.desc()) \
.paginate(page, per_page, error_out=False) # Get top level comments
return render_template('main/read.html', form=form, tl_comments=tl_comments, page=page, id=id)
return render_template('main/read.html', form=form)
def scrape_reddit(id):
user_agent = ("RScraper")
r = praw.Reddit(user_agent=user_agent, client_id=os.getenv('REDDIT_CLIENT_ID'),
client_secret=os.getenv('REDDIT_SECRET'),
recirect_url=os.getenv('REDIRECT_URI'))
submission = r.submission(id=id)
if not submission.author:
name = '[deleted]'
else:
name = submission.author.name
if Submission.query.filter_by(id=id).count() == 0:
sub = Submission(
id=id,
author=name,
created_utc=datetime.fromtimestamp(submission.created_utc),
score=submission.score,
subreddit=submission.subreddit.name
)
db.session.add(sub)
db.session.commit()
comment_queue = submission.comments[:]
while comment_queue:
comment = comment_queue.pop(0)
if isinstance(comment, MoreComments):
comment_queue.extend(comment.comments())
else:
submit_comment(comment)
comment_queue.extend(comment.replies)
return submission.num_comments
def sanitize_link_id(link_id):
if len(re.findall("^t[0-9]_", link_id)) == 0:
return link_id
else:
return link_id.split("_")[1]
def submit_comment(comment):
if Comment.query.filter_by(id=comment.id).count() != 0:
return True
if not comment.author:
name = '[deleted]'
else:
name = comment.author.name
link_id = sanitize_link_id(comment.link_id)
parent_id = sanitize_link_id(comment.parent_id)
com = Comment(
id=comment.id,
author=name,
body=comment.body,
parent_id=parent_id,
permalink=comment.permalink,
created_utc=datetime.fromtimestamp(comment.created_utc),
score=comment.score,
link_id=link_id
)
db.session.add(com)
try:
db.session.commit()
return True
except InvalidRequestError:
return False
#Using pushshift instead of reddit API
'''
def make_request(uri, max_retries=5):
def fire_away(uri):
response = requests.get(uri)
assert response.status_code == 200
return json.loads(response.content)
current_tries = 1
while current_tries < max_retries:
try:
time.sleep(1)
response = fire_away(uri)
return response
except:
time.sleep(1)
current_tries += 1
return fire_away(uri)
def pull_comments_for(id, limit=20000):
uri_template = r'https://api.pushshift.io/reddit/comment/search/?link_id={}&limit={}'
return make_request(uri_template.format(id, limit))
def pull_submission_data(id, limit=1):
uri_template = r'https://api.pushshift.io/reddit/submission/search/?ids={}&limit={}'
return make_request(uri_template.format(id, limit))
def scrape_and_commit(id):
if Submission.query.filter_by(id=id).count() == 0:
submission_json = pull_submission_data(id)["data"][0]
submission = Submission(
id=submission_json["id"],
author=submission_json["author"],
created_utc=datetime.datetime.fromtimestamp(submission_json["created_utc"]),
domain=submission_json["domain"],
score=submission_json["score"],
subreddit=submission_json["subreddit"]
)
db.session.add(submission)
db.session.commit()
comments_json = pull_comments_for(id)
for comment_json in comments_json["data"]:
if Comment.query.filter_by(id=comment_json["id"]).count() == 0:
link_id = sanitize_link_id(comment_json["link_id"])
comment = Comment(
id=comment_json["id"],
author=comment_json["author"],
body=comment_json["body"],
parent_id=comment_json["parent_id"],
permalink=comment_json["permalink"],
created_utc=datetime.datetime.fromtimestamp(comment_json["created_utc"]),
score=comment_json["score"],
link_id=link_id
)
db.session.add(comment)
db.session.commit()
return len(comments_json["data"])
'''
| [] | [] | [
"REDIRECT_URI",
"REDDIT_CLIENT_ID",
"REDDIT_SECRET"
] | [] | ["REDIRECT_URI", "REDDIT_CLIENT_ID", "REDDIT_SECRET"] | python | 3 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alpha.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
crud_google_spreadsheet/function.py | import os
import requests
from datetime import datetime
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# You can find more crypto assets here: https://messari.io/
CRYPTO = os.getenv('CRYPTO', 'BTC') # If not provided, let's collect Bitcoin by default
JSON_KEYFILE = os.getenv('JSON_KEYFILE') # The file you get after generating Google Spreadsheet API key
SHEET_ID = os.getenv('SHEET_ID') # You can get Sheet ID from a Google Spreadsheet URL
SHEET_NAME = os.getenv('SHEET_NAME') # The name of the sheet where to insert data
SCOPE = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive']
def get_gs_client(json_keyfile, scope):
creds = ServiceAccountCredentials.from_json_keyfile_name(json_keyfile, scope)
client = gspread.authorize(creds)
return client
def collect_crypto(crypto):
url = f'https://data.messari.io/api/v1/assets/{crypto}/metrics'
resp = requests.get(url)
data = resp.json()
price = data['data']['market_data']['price_usd']
date = str(datetime.now())
return [crypto, price, date]
def append_record(gsheet_id, sheet_name, record):
gclient = get_gs_client(JSON_KEYFILE, SCOPE)
advisers_sheet = gclient.open_by_key(gsheet_id)
# Find more methods here: https://gspread.readthedocs.io/en/latest/api.html
advisers_sheet.worksheet(sheet_name).append_row(record, 'USER_ENTERED')
def main():
print('Start executing...')
crypto_data = collect_crypto(CRYPTO)
print(f'{CRYPTO} price: {crypto_data[1]}')
append_record(SHEET_ID, SHEET_NAME, crypto_data)
print('Finish executing.')
if __name__ == '__main__':
main()
| [] | [] | [
"JSON_KEYFILE",
"SHEET_NAME",
"CRYPTO",
"SHEET_ID"
] | [] | ["JSON_KEYFILE", "SHEET_NAME", "CRYPTO", "SHEET_ID"] | python | 4 | 0 | |
neon/util/files.go | package util
import (
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/mattn/go-zglob"
)
const (
// FileMode is default file mode
FileMode = 0644
// DirFileMode is default directory file mode
DirFileMode = 0755
)
// ReadFile reads given file and return it as a byte slice:
// - file: the file to read
// Return:
// - content as a slice of bytes
// - an error if something went wrong
func ReadFile(file string) ([]byte, error) {
path := ExpandUserHome(file)
bytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("reading file '%s': %v", file, err)
}
return bytes, nil
}
// FileExists tells if given file exists:
// - file: the name of the file to test
// Return: a boolean that tells if file exists
func FileExists(file string) bool {
file = ExpandUserHome(file)
if stat, err := os.Stat(file); err == nil && !stat.IsDir() {
return true
}
return false
}
// DirExists tells if directory exists:
// - dir: directory to test
// Return: a boolean that tells if directory exists
func DirExists(dir string) bool {
dir = ExpandUserHome(dir)
if stat, err := os.Stat(dir); err == nil && stat.IsDir() {
return true
}
return false
}
// FileIsExecutable tells if given file is executable by user:
// - file: file to test
// Return: a boolean that tells if file is executable by user
func FileIsExecutable(file string) bool {
if stat, err := os.Stat(file); err == nil && stat.Mode()&0111 != 0 {
return true
}
return false
}
// CopyFile copies source file to destination, preserving mode:
// - source: the source file
// - dest: the destination file
// Return: error if something went wrong
func CopyFile(source, dest string) error {
source = ExpandUserHome(source)
dest = ExpandUserHome(dest)
from, err := os.Open(source)
if err != nil {
return fmt.Errorf("opening source file '%s': %v", source, err)
}
info, err := from.Stat()
if err != nil {
return fmt.Errorf("getting mode of source file '%s': %v", source, err)
}
defer from.Close()
to, err := os.Create(dest)
if err != nil {
return fmt.Errorf("creating desctination file '%s': %v", dest, err)
}
defer to.Close()
_, err = io.Copy(to, from)
if err != nil {
return fmt.Errorf("copying file: %v", err)
}
err = to.Sync()
if err != nil {
return fmt.Errorf("syncing destination file: %v", err)
}
if !Windows() {
err = to.Chmod(info.Mode())
if err != nil {
return fmt.Errorf("changing mode of destination file '%s': %v", dest, err)
}
}
return nil
}
// CopyFilesToDir copies files in root directory to destination directory:
// - dir: root directory
// - files: globs of source files
// - toDir: destination directory
// - flatten: tells if files should be flatten in destination directory
// Return: an error if something went wrong
func CopyFilesToDir(dir string, files []string, toDir string, flatten bool) error {
if stat, err := os.Stat(toDir); err != nil || !stat.IsDir() {
return fmt.Errorf("destination directory doesn't exist")
}
for _, file := range files {
source := file
if !filepath.IsAbs(file) {
source = filepath.Join(dir, file)
}
var dest string
if flatten || filepath.IsAbs(file) {
base := filepath.Base(file)
dest = filepath.Join(toDir, base)
} else {
dest = filepath.Join(toDir, file)
destDir := filepath.Dir(dest)
if !DirExists(destDir) {
err := os.MkdirAll(destDir, DirFileMode)
if err != nil {
return fmt.Errorf("creating directory for destination file: %v", err)
}
}
}
err := CopyFile(source, dest)
if err != nil {
return err
}
}
return nil
}
// MoveFilesToDir moves files in source directory to destination:
// - dir: root directory of source files
// - files: globs of files to move
// - toDir: destination directory
// - flatten: tells if files should be flatten in destination directory
// Return: an error if something went wrong
func MoveFilesToDir(dir string, files []string, toDir string, flatten bool) error {
dir = ExpandUserHome(dir)
toDir = ExpandUserHome(toDir)
if stat, err := os.Stat(toDir); err != nil || !stat.IsDir() {
return fmt.Errorf("destination directory doesn't exist")
}
for _, file := range files {
file = ExpandUserHome(file)
source := filepath.Join(dir, file)
var dest string
if flatten {
base := filepath.Base(file)
dest = filepath.Join(toDir, base)
} else {
dest = filepath.Join(toDir, file)
destDir := filepath.Dir(dest)
if !DirExists(destDir) {
err := os.MkdirAll(destDir, DirFileMode)
if err != nil {
return fmt.Errorf("creating directory for destination file: %v", err)
}
}
}
err := os.Rename(source, dest)
if err != nil {
return err
}
}
return nil
}
// ExpandUserHome expand path starting with "~/":
// - path: the path to expand
// Return: expanded path
func ExpandUserHome(path string) string {
if strings.HasPrefix(path, "~/") {
user, _ := user.Current()
home := user.HomeDir
path = filepath.Join(home, path[2:])
}
return path
}
// PathToUnix turns a path to Unix format (with "/"):
// - path: path to turn to unix format
// Return: converted path
func PathToUnix(path string) string {
// replace path separator \ with /
path = strings.Replace(path, "\\", "/", -1)
// replace c: with /c
r := regexp.MustCompile("^[A-Za-z]:.*$")
if r.MatchString(path) {
path = "/" + path[0:1] + path[2:]
}
return path
}
// PathToWindows turns a path to Windows format (with "\"):
// - path: path to turn to windows format
// Return: converted path
func PathToWindows(path string) string {
// replace path separator / with \
path = strings.Replace(path, "/", "\\", -1)
// replace /c/ with c:/
r := regexp.MustCompile(`^\\[A-Za-z]\\.*$`)
if r.MatchString(path) {
path = path[1:2] + ":" + path[2:]
}
return path
}
// FindFiles finds files in the context:
// - dir: the search root directory (current dir if empty)
// - includes: the list of globs to include
// - excludes: the list of globs to exclude
// - folder: tells if we should include folders
// Return the list of files as a slice of strings
func FindFiles(dir string, includes, excludes []string, folder bool) ([]string, error) {
var err error
included := joinPath(dir, includes)
excluded := joinPath(dir, excludes)
included, err = filterFolders(included, folder)
if err != nil {
return nil, err
}
files := filterExcluded(included, excluded)
files, err = makeRelative(dir, files)
if err != nil {
return nil, err
}
sort.Strings(files)
return files, nil
}
// FindInPath search given executable in PATH:
// - executable: executable to search.
// Return: list of directories containing executable
func FindInPath(executable string) []string {
path := os.Getenv("PATH")
dirs := strings.Split(path, string(os.PathListSeparator))
var paths []string
for _, dir := range dirs {
file := filepath.Join(dir, executable)
if FileIsExecutable(file) {
paths = append(paths, file)
}
}
return paths
}
func joinPath(dir string, paths []string) []string {
var joined []string
for _, path := range paths {
if !filepath.IsAbs(path) {
path = filepath.Join(dir, path)
}
joined = append(joined, path)
}
return joined
}
func filterFolders(included []string, folder bool) ([]string, error) {
var candidates []string
for _, include := range included {
list, _ := zglob.Glob(include)
for _, file := range list {
stat, err := os.Stat(file)
if err != nil {
return nil, fmt.Errorf("stating file: %v", err)
}
if stat.Mode().IsRegular() ||
(stat.Mode().IsDir() && folder) {
candidates = append(candidates, file)
}
}
}
return candidates, nil
}
func filterExcluded(candidates []string, excluded []string) []string {
var files []string
if excluded != nil {
for index, file := range candidates {
for _, exclude := range excluded {
match, err := zglob.Match(exclude, file)
if match || err != nil {
candidates[index] = ""
}
}
}
for _, file := range candidates {
if file != "" {
files = append(files, file)
}
}
} else {
files = candidates
}
return files
}
func makeRelative(dir string, files []string) ([]string, error) {
var err error
for index, file := range files {
if dir != "" {
files[index], err = filepath.Rel(dir, file)
if err != nil {
return nil, err
}
}
}
return files, nil
}
| [
"\"PATH\""
] | [] | [
"PATH"
] | [] | ["PATH"] | go | 1 | 0 | |
cmd/ipwswatch/main.go | package main
import (
"context"
"flag"
"log"
"os"
"os/signal"
"path/filepath"
"syscall"
commands "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
coreapi "github.com/ipfs/go-ipfs/core/coreapi"
corehttp "github.com/ipfs/go-ipfs/core/corehttp"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
fsnotify "github.com/fsnotify/fsnotify"
config "github.com/ipweb-group/go-ipws-config"
files "github.com/ipfs/go-ipfs-files"
process "github.com/jbenet/goprocess"
homedir "github.com/mitchellh/go-homedir"
)
var http = flag.Bool("http", false, "expose IPWS HTTP API")
var repoPath = flag.String("repo", os.Getenv("IPWS_PATH"), "IPWS_PATH to use")
var watchPath = flag.String("path", ".", "the path to watch")
func main() {
flag.Parse()
// precedence
// 1. --repo flag
// 2. IPWS_PATH environment variable
// 3. default repo path
var ipfsPath string
if *repoPath != "" {
ipfsPath = *repoPath
} else {
var err error
ipfsPath, err = fsrepo.BestKnownPath()
if err != nil {
log.Fatal(err)
}
}
if err := run(ipfsPath, *watchPath); err != nil {
log.Fatal(err)
}
}
func run(ipfsPath, watchPath string) error {
proc := process.WithParent(process.Background())
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
ipfsPath, err := homedir.Expand(ipfsPath)
if err != nil {
return err
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer watcher.Close()
if err := addTree(watcher, watchPath); err != nil {
return err
}
r, err := fsrepo.Open(ipfsPath)
if err != nil {
// TODO handle case: daemon running
// TODO handle case: repo doesn't exist or isn't initialized
return err
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{
Online: true,
Repo: r,
})
if err != nil {
return err
}
defer node.Close()
api, err := coreapi.NewCoreAPI(node)
if err != nil {
return err
}
if *http {
addr := "/ip4/127.0.0.1/tcp/5001"
var opts = []corehttp.ServeOption{
corehttp.GatewayOption(true, "/ipfs", "/ipns"),
corehttp.WebUIOption,
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
}
proc.Go(func(p process.Process) {
if err := corehttp.ListenAndServe(node, addr, opts...); err != nil {
return
}
})
}
interrupts := make(chan os.Signal, 1)
signal.Notify(interrupts, os.Interrupt, syscall.SIGTERM)
for {
select {
case <-interrupts:
return nil
case e := <-watcher.Events:
log.Printf("received event: %s", e)
isDir, err := IsDirectory(e.Name)
if err != nil {
continue
}
switch e.Op {
case fsnotify.Remove:
if isDir {
if err := watcher.Remove(e.Name); err != nil {
return err
}
}
default:
// all events except for Remove result in an IPFS.Add, but only
// directory creation triggers a new watch
switch e.Op {
case fsnotify.Create:
if isDir {
if err := addTree(watcher, e.Name); err != nil {
return err
}
}
}
proc.Go(func(p process.Process) {
file, err := os.Open(e.Name)
if err != nil {
log.Println(err)
return
}
defer file.Close()
st, err := file.Stat()
if err != nil {
log.Println(err)
return
}
f, err := files.NewReaderPathFile(e.Name, file, st)
if err != nil {
log.Println(err)
return
}
k, err := api.Unixfs().Add(node.Context(), f)
if err != nil {
log.Println(err)
}
log.Printf("added %s... key: %s", e.Name, k)
})
}
case err := <-watcher.Errors:
log.Println(err)
}
}
}
func addTree(w *fsnotify.Watcher, root string) error {
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
log.Println(err)
return nil
}
isDir, err := IsDirectory(path)
if err != nil {
log.Println(err)
return nil
}
switch {
case isDir && IsHidden(path):
log.Println(path)
return filepath.SkipDir
case isDir:
log.Println(path)
if err := w.Add(path); err != nil {
return err
}
default:
return nil
}
return nil
})
return err
}
func IsDirectory(path string) (bool, error) {
fileInfo, err := os.Stat(path)
return fileInfo.IsDir(), err
}
func IsHidden(path string) bool {
path = filepath.Base(path)
if path == "." || path == "" {
return false
}
if rune(path[0]) == rune('.') {
return true
}
return false
}
func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context {
return commands.Context{
ConfigRoot: repoPath,
LoadConfig: func(path string) (*config.Config, error) {
return node.Repo.Config()
},
ConstructNode: func() (*core.IpfsNode, error) {
return node, nil
},
}
}
| [
"\"IPWS_PATH\""
] | [] | [
"IPWS_PATH"
] | [] | ["IPWS_PATH"] | go | 1 | 0 | |
syncer/syncer_test.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package syncer
import (
"context"
"database/sql"
"fmt"
"os"
"strconv"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
. "github.com/pingcap/check"
"github.com/pingcap/dm/pkg/log"
parserpkg "github.com/pingcap/dm/pkg/parser"
"github.com/pingcap/parser/ast"
bf "github.com/pingcap/tidb-tools/pkg/binlog-filter"
cm "github.com/pingcap/tidb-tools/pkg/column-mapping"
"github.com/pingcap/tidb-tools/pkg/filter"
gmysql "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
"github.com/pingcap/dm/dm/config"
"github.com/pingcap/dm/pkg/binlog/event"
"github.com/pingcap/dm/pkg/utils"
)
var _ = Suite(&testSyncerSuite{})
func TestSuite(t *testing.T) {
TestingT(t)
}
type testSyncerSuite struct {
db *sql.DB
syncer *replication.BinlogSyncer
streamer *replication.BinlogStreamer
cfg *config.SubTaskConfig
}
func (s *testSyncerSuite) SetUpSuite(c *C) {
host := os.Getenv("MYSQL_HOST")
if host == "" {
host = "127.0.0.1"
}
port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT"))
if port == 0 {
port = 3306
}
user := os.Getenv("MYSQL_USER")
if user == "" {
user = "root"
}
pswd := os.Getenv("MYSQL_PSWD")
s.cfg = &config.SubTaskConfig{
From: config.DBConfig{
Host: host,
User: user,
Password: pswd,
Port: port,
},
To: config.DBConfig{
Host: host,
User: user,
Password: pswd,
Port: port,
},
ServerID: 101,
MetaSchema: "test",
}
s.cfg.From.Adjust()
s.cfg.To.Adjust()
var err error
dbAddr := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8", s.cfg.From.User, s.cfg.From.Password, s.cfg.From.Host, s.cfg.From.Port)
s.db, err = sql.Open("mysql", dbAddr)
if err != nil {
log.Fatal(err)
}
s.resetBinlogSyncer()
_, err = s.db.Exec("SET GLOBAL binlog_format = 'ROW';")
c.Assert(err, IsNil)
}
func (s *testSyncerSuite) resetBinlogSyncer() {
var err error
cfg := replication.BinlogSyncerConfig{
ServerID: uint32(s.cfg.ServerID),
Flavor: "mysql",
Host: s.cfg.From.Host,
Port: uint16(s.cfg.From.Port),
User: s.cfg.From.User,
Password: s.cfg.From.Password,
UseDecimal: true,
VerifyChecksum: true,
}
if s.cfg.Timezone != "" {
timezone, err2 := time.LoadLocation(s.cfg.Timezone)
if err != nil {
log.Fatal(err2)
}
cfg.TimestampStringLocation = timezone
}
pos := gmysql.Position{Name: "", Pos: 4}
if s.syncer != nil {
s.syncer.Close()
pos = s.syncer.GetNextPosition()
} else {
s.resetMaster()
}
s.syncer = replication.NewBinlogSyncer(cfg)
s.streamer, err = s.syncer.StartSync(pos)
if err != nil {
log.Fatal(err)
}
}
func (s *testSyncerSuite) TearDownSuite(c *C) {
s.db.Close()
}
func (s *testSyncerSuite) resetMaster() {
s.db.Exec("reset master")
}
func (s *testSyncerSuite) catchUpBinlog() {
ch := make(chan interface{})
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
ev, _ := s.streamer.GetEvent(ctx)
if ev == nil {
return
}
ch <- struct{}{}
}
}()
for {
select {
case <-ch:
// do nothing
case <-time.After(10 * time.Millisecond):
cancel()
return
}
}
}
func (s *testSyncerSuite) TestSelectDB(c *C) {
s.cfg.BWList = &filter.Rules{
DoDBs: []string{"~^b.*", "s1", "stest"},
}
schemas := [][]byte{[]byte("s1"), []byte("s2"), []byte("btest"), []byte("b1"), []byte("stest"), []byte("st")}
skips := []bool{false, true, false, false, false, true}
type Case struct {
schema []byte
query []byte
skip bool
}
cases := make([]Case, 0, 2*len(schemas))
for i, schema := range schemas {
cases = append(cases, Case{
schema: schema,
query: append([]byte("create database "), schema...),
skip: skips[i]})
cases = append(cases, Case{
schema: schema,
query: append([]byte("drop database "), schema...),
skip: skips[i]})
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
syncer := NewSyncer(s.cfg)
err = syncer.genRouter()
c.Assert(err, IsNil)
header := &replication.EventHeader{
Timestamp: uint32(time.Now().Unix()),
ServerID: 101,
Flags: 0x01,
}
for _, cs := range cases {
e, err := event.GenQueryEvent(header, 123, 0, 0, 0, nil, cs.schema, cs.query)
c.Assert(err, IsNil)
c.Assert(e, NotNil)
ev, ok := e.Event.(*replication.QueryEvent)
c.Assert(ok, IsTrue)
query := string(ev.Query)
stmt, err := p.ParseOneStmt(query, "", "")
c.Assert(err, IsNil)
tableNames, err := parserpkg.FetchDDLTableNames(string(ev.Schema), stmt)
c.Assert(err, IsNil)
r, err := syncer.skipQuery(tableNames, stmt, query)
c.Assert(err, IsNil)
c.Assert(r, Equals, cs.skip)
}
}
func (s *testSyncerSuite) TestSelectTable(c *C) {
s.cfg.BWList = &filter.Rules{
DoDBs: []string{"t2", "stest", "~^ptest*"},
DoTables: []*filter.Table{
{Schema: "stest", Name: "log"},
{Schema: "stest", Name: "~^t.*"},
{Schema: "~^ptest*", Name: "~^t.*"},
},
}
sqls := []string{
"create database s1",
"create table s1.log(id int)",
"drop database s1",
"create table mysql.test(id int)",
"drop table mysql.test",
"create database stest",
"create table stest.log(id int)",
"create table stest.t(id int)",
"create table stest.log2(id int)",
"insert into stest.t(id) values (10)",
"insert into stest.log(id) values (10)",
"insert into stest.log2(id) values (10)",
"drop table stest.log,stest.t,stest.log2",
"drop database stest",
"create database t2",
"create table t2.log(id int)",
"create table t2.log1(id int)",
"drop table t2.log",
"drop database t2",
"create database ptest1",
"create table ptest1.t1(id int)",
"drop database ptest1",
}
res := [][]bool{
{true},
{true},
{true},
{true},
{true},
{false},
{false},
{false},
{true},
{false},
{false},
{true},
{false, false, true},
{false},
{false},
{true},
{true},
{true},
{false},
{false},
{false},
{false},
}
for _, sql := range sqls {
s.db.Exec(sql)
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
syncer := NewSyncer(s.cfg)
syncer.genRouter()
var i int
for {
if i == len(sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.QueryEvent:
query := string(ev.Query)
result, err := syncer.parseDDLSQL(query, p, string(ev.Schema))
c.Assert(err, IsNil)
if !result.isDDL {
continue // BEGIN event
}
querys, _, err := syncer.resolveDDLSQL(p, result.stmt, string(ev.Schema))
c.Assert(err, IsNil)
if len(querys) == 0 {
continue
}
for j, sql := range querys {
stmt, err := p.ParseOneStmt(sql, "", "")
c.Assert(err, IsNil)
tableNames, err := parserpkg.FetchDDLTableNames(string(ev.Schema), stmt)
c.Assert(err, IsNil)
r, err := syncer.skipQuery(tableNames, stmt, sql)
c.Assert(err, IsNil)
c.Assert(r, Equals, res[i][j])
}
case *replication.RowsEvent:
r, err := syncer.skipDMLEvent(string(ev.Table.Schema), string(ev.Table.Table), e.Header.EventType)
c.Assert(err, IsNil)
c.Assert(r, Equals, res[i][0])
default:
continue
}
i++
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestIgnoreDB(c *C) {
s.cfg.BWList = &filter.Rules{
IgnoreDBs: []string{"~^b.*", "s1", "stest"},
}
sqls := []string{
"create database s1",
"drop database s1",
"create database s2",
"drop database s2",
"create database btest",
"drop database btest",
"create database b1",
"drop database b1",
"create database stest",
"drop database stest",
"create database st",
"drop database st",
}
res := []bool{true, true, false, false, true, true, true, true, true, true, false, false}
for _, sql := range sqls {
s.db.Exec(sql)
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
syncer := NewSyncer(s.cfg)
syncer.genRouter()
i := 0
for {
if i == len(sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
ev, ok := e.Event.(*replication.QueryEvent)
if !ok {
continue
}
sql := string(ev.Query)
stmt, err := p.ParseOneStmt(sql, "", "")
c.Assert(err, IsNil)
tableNames, err := parserpkg.FetchDDLTableNames(sql, stmt)
c.Assert(err, IsNil)
r, err := syncer.skipQuery(tableNames, stmt, sql)
c.Assert(err, IsNil)
c.Assert(r, Equals, res[i])
i++
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestIgnoreTable(c *C) {
s.cfg.BWList = &filter.Rules{
IgnoreDBs: []string{"t2"},
IgnoreTables: []*filter.Table{
{Schema: "stest", Name: "log"},
{Schema: "stest", Name: "~^t.*"},
},
}
sqls := []string{
"create database s1",
"create table s1.log(id int)",
"drop database s1",
"create table mysql.test(id int)",
"drop table mysql.test",
"create database stest",
"create table stest.log(id int)",
"create table stest.t(id int)",
"create table stest.log2(id int)",
"insert into stest.t(id) values (10)",
"insert into stest.log(id) values (10)",
"insert into stest.log2(id) values (10)",
"drop table stest.log,stest.t,stest.log2",
"drop database stest",
"create database t2",
"create table t2.log(id int)",
"create table t2.log1(id int)",
"drop table t2.log",
"drop database t2",
}
res := [][]bool{
{false},
{false},
{false},
{true},
{true},
{false},
{true},
{true},
{false},
{true},
{true},
{false},
{true, true, false},
{false},
{true},
{true},
{true},
{true},
{true},
}
for _, sql := range sqls {
s.db.Exec(sql)
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
syncer := NewSyncer(s.cfg)
syncer.genRouter()
i := 0
for {
if i == len(sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.QueryEvent:
query := string(ev.Query)
result, err := syncer.parseDDLSQL(query, p, string(ev.Schema))
c.Assert(err, IsNil)
if !result.isDDL {
continue // BEGIN event
}
querys, _, err := syncer.resolveDDLSQL(p, result.stmt, string(ev.Schema))
c.Assert(err, IsNil)
if len(querys) == 0 {
continue
}
for j, sql := range querys {
stmt, err := p.ParseOneStmt(sql, "", "")
c.Assert(err, IsNil)
tableNames, err := parserpkg.FetchDDLTableNames(string(ev.Schema), stmt)
c.Assert(err, IsNil)
r, err := syncer.skipQuery(tableNames, stmt, sql)
c.Assert(err, IsNil)
c.Assert(r, Equals, res[i][j])
}
case *replication.RowsEvent:
r, err := syncer.skipDMLEvent(string(ev.Table.Schema), string(ev.Table.Table), e.Header.EventType)
c.Assert(err, IsNil)
c.Assert(r, Equals, res[i][0])
default:
continue
}
i++
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestSkipDML(c *C) {
s.cfg.FilterRules = []*bf.BinlogEventRule{
{
SchemaPattern: "*",
TablePattern: "",
Events: []bf.EventType{bf.UpdateEvent},
Action: bf.Ignore,
}, {
SchemaPattern: "foo",
TablePattern: "",
Events: []bf.EventType{bf.DeleteEvent},
Action: bf.Ignore,
}, {
SchemaPattern: "foo1",
TablePattern: "bar1",
Events: []bf.EventType{bf.DeleteEvent},
Action: bf.Ignore,
},
}
s.cfg.BWList = nil
sqls := []struct {
sql string
isDML bool
skipped bool
}{
{"drop database if exists foo", false, false},
{"create database foo", false, false},
{"create table foo.bar(id int)", false, false},
{"insert into foo.bar values(1)", true, false},
{"update foo.bar set id=2", true, true},
{"delete from foo.bar where id=2", true, true},
{"drop database if exists foo1", false, false},
{"create database foo1", false, false},
{"create table foo1.bar1(id int)", false, false},
{"insert into foo1.bar1 values(1)", true, false},
{"update foo1.bar1 set id=2", true, true},
{"delete from foo1.bar1 where id=2", true, true},
}
for i := range sqls {
s.db.Exec(sqls[i].sql)
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
syncer := NewSyncer(s.cfg)
syncer.genRouter()
syncer.binlogFilter, err = bf.NewBinlogEvent(false, s.cfg.FilterRules)
c.Assert(err, IsNil)
i := 0
for {
if i >= len(sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.QueryEvent:
stmt, err := p.ParseOneStmt(string(ev.Query), "", "")
c.Assert(err, IsNil)
_, isDDL := stmt.(ast.DDLNode)
if !isDDL {
continue
}
case *replication.RowsEvent:
r, err := syncer.skipDMLEvent(string(ev.Table.Schema), string(ev.Table.Table), e.Header.EventType)
c.Assert(err, IsNil)
c.Assert(r, Equals, sqls[i].skipped)
default:
continue
}
i++
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestColumnMapping(c *C) {
rules := []*cm.Rule{
{
PatternSchema: "stest*",
PatternTable: "log*",
TargetColumn: "id",
Expression: cm.AddPrefix,
Arguments: []string{"test:"},
},
{
PatternSchema: "stest*",
PatternTable: "t*",
TargetColumn: "id",
Expression: cm.PartitionID,
Arguments: []string{"1", "stest_", "t_"},
},
}
createTableSQLs := []string{
"create database if not exists stest_3",
"create table if not exists stest_3.log(id varchar(45))",
"create table if not exists stest_3.t_2(name varchar(45), id bigint)",
"create table if not exists stest_3.a(id int)",
}
dmls := []struct {
sql string
column []string
data []interface{}
}{
{"insert into stest_3.t_2(name, id) values (\"ian\", 10)", []string{"name", "id"}, []interface{}{"ian", int64(1<<59 | 3<<52 | 2<<44 | 10)}},
{"insert into stest_3.log(id) values (\"10\")", []string{"id"}, []interface{}{"test:10"}},
{"insert into stest_3.a(id) values (10)", []string{"id"}, []interface{}{int32(10)}},
}
dropTableSQLs := []string{
"drop table stest_3.log,stest_3.t_2,stest_3.a",
"drop database stest_3",
}
for _, sql := range createTableSQLs {
s.db.Exec(sql)
}
for i := range dmls {
s.db.Exec(dmls[i].sql)
}
for _, sql := range dropTableSQLs {
s.db.Exec(sql)
}
p, err := utils.GetParser(s.db, false)
c.Assert(err, IsNil)
mapping, err := cm.NewMapping(false, rules)
c.Assert(err, IsNil)
totalEvent := len(dmls) + len(createTableSQLs) + len(dropTableSQLs)
i := 0
dmlIndex := 0
for {
if i == totalEvent {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.QueryEvent:
stmt, err := p.ParseOneStmt(string(ev.Query), "", "")
c.Assert(err, IsNil)
_, isDDL := stmt.(ast.DDLNode)
if !isDDL {
continue
}
case *replication.RowsEvent:
r, _, err := mapping.HandleRowValue(string(ev.Table.Schema), string(ev.Table.Table), dmls[dmlIndex].column, ev.Rows[0])
c.Assert(err, IsNil)
c.Assert(r, DeepEquals, dmls[dmlIndex].data)
dmlIndex++
default:
continue
}
i++
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestTimezone(c *C) {
s.cfg.BWList = &filter.Rules{
DoDBs: []string{"~^tztest_.*"},
IgnoreDBs: []string{"stest", "~^foo.*"},
}
createSQLs := []string{
"create database if not exists tztest_1",
"create table if not exists tztest_1.t_1(id int, a timestamp)",
}
testCases := []struct {
sqls []string
timezone string
}{
{
[]string{
"insert into tztest_1.t_1(id, a) values (1, '1990-04-15 01:30:12')",
"insert into tztest_1.t_1(id, a) values (2, '1990-04-15 02:30:12')",
"insert into tztest_1.t_1(id, a) values (3, '1990-04-15 03:30:12')",
},
"Asia/Shanghai",
},
{
[]string{
"insert into tztest_1.t_1(id, a) values (4, '1990-04-15 01:30:12')",
"insert into tztest_1.t_1(id, a) values (5, '1990-04-15 02:30:12')",
"insert into tztest_1.t_1(id, a) values (6, '1990-04-15 03:30:12')",
},
"America/Phoenix",
},
}
queryTs := "select unix_timestamp(a) from `tztest_1`.`t_1` where id = ?"
dropSQLs := []string{
"drop table tztest_1.t_1",
"drop database tztest_1",
}
for _, sql := range createSQLs {
s.db.Exec(sql)
}
for _, testCase := range testCases {
s.cfg.Timezone = testCase.timezone
syncer := NewSyncer(s.cfg)
syncer.genRouter()
s.resetBinlogSyncer()
// we should not use `sql.DB.Exec` to do query which depends on session variables
// because `sql.DB.Exec` will choose a underlying Conn for every query from the connection pool
// and different Conn using different session
// ref: `sql.DB.Conn`
// and `set @@global` is also not reasonable, because it can not affect sessions already exist
// if we must ensure multi queries use the same session, we should use a transaction
txn, err := s.db.Begin()
c.Assert(err, IsNil)
txn.Exec("set @@session.time_zone = ?", testCase.timezone)
txn.Exec("set @@session.sql_mode = ''")
for _, sql := range testCase.sqls {
_, err = txn.Exec(sql)
c.Assert(err, IsNil)
}
err = txn.Commit()
c.Assert(err, IsNil)
location, err := time.LoadLocation(testCase.timezone)
c.Assert(err, IsNil)
idx := 0
for {
if idx >= len(testCase.sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.RowsEvent:
skip, err := syncer.skipDMLEvent(string(ev.Table.Schema), string(ev.Table.Table), e.Header.EventType)
c.Assert(err, IsNil)
if skip {
continue
}
rowid := ev.Rows[0][0].(int32)
var ts sql.NullInt64
err2 := s.db.QueryRow(queryTs, rowid).Scan(&ts)
c.Assert(err2, IsNil)
c.Assert(ts.Valid, IsTrue)
raw := ev.Rows[0][1].(string)
data, err := time.ParseInLocation("2006-01-02 15:04:05", raw, location)
c.Assert(err, IsNil)
c.Assert(data.Unix(), DeepEquals, ts.Int64)
idx++
default:
continue
}
}
}
for _, sql := range dropSQLs {
s.db.Exec(sql)
}
s.catchUpBinlog()
}
func (s *testSyncerSuite) TestGeneratedColumn(c *C) {
s.cfg.BWList = &filter.Rules{
DoDBs: []string{"~^gctest_.*"},
}
createSQLs := []string{
"create database if not exists gctest_1 DEFAULT CHARSET=utf8mb4",
"create table if not exists gctest_1.t_1(id int, age int, cfg varchar(40), cfg_json json as (cfg) virtual)",
"create table if not exists gctest_1.t_2(id int primary key, age int, cfg varchar(40), cfg_json json as (cfg) virtual)",
"create table if not exists gctest_1.t_3(id int, cfg varchar(40), gen_id int as (cfg->\"$.id\"), unique key gen_id_unique(`gen_id`))",
}
// if table has json typed generated column but doesn't have primary key or unique key,
// update/delete operation will not be replicated successfully because json field can't
// compared with raw value in where condition. In unit test we only check generated SQL
// and don't check the data replication to downstream.
testCases := []struct {
sqls []string
expected []string
args [][]interface{}
}{
{
[]string{
"insert into gctest_1.t_1(id, age, cfg) values (1, 18, '{}')",
"insert into gctest_1.t_1(id, age, cfg) values (2, 19, '{\"key\": \"value\"}')",
"insert into gctest_1.t_1(id, age, cfg) values (3, 17, NULL)",
"insert into gctest_1.t_2(id, age, cfg) values (1, 18, '{}')",
"insert into gctest_1.t_2(id, age, cfg) values (2, 19, '{\"key\": \"value\", \"int\": 123}')",
"insert into gctest_1.t_2(id, age, cfg) values (3, 17, NULL)",
"insert into gctest_1.t_3(id, cfg) values (1, '{\"id\": 1}')",
"insert into gctest_1.t_3(id, cfg) values (2, '{\"id\": 2}')",
"insert into gctest_1.t_3(id, cfg) values (3, '{\"id\": 3}')",
},
[]string{
"REPLACE INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?);",
"REPLACE INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?);",
"REPLACE INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?);",
"REPLACE INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?);",
},
[][]interface{}{
{int32(1), int32(18), "{}"},
{int32(2), int32(19), "{\"key\": \"value\"}"},
{int32(3), int32(17), nil},
{int32(1), int32(18), "{}"},
{int32(2), int32(19), "{\"key\": \"value\", \"int\": 123}"},
{int32(3), int32(17), nil},
{int32(1), "{\"id\": 1}"},
{int32(2), "{\"id\": 2}"},
{int32(3), "{\"id\": 3}"},
},
},
{
[]string{
"update gctest_1.t_1 set cfg = '{\"a\": 12}', age = 21 where id = 1",
"update gctest_1.t_1 set cfg = '{}' where id = 2 and age = 19",
"update gctest_1.t_1 set age = 20 where cfg is NULL",
"update gctest_1.t_2 set cfg = '{\"a\": 12}', age = 21 where id = 1",
"update gctest_1.t_2 set cfg = '{}' where id = 2 and age = 19",
"update gctest_1.t_2 set age = 20 where cfg is NULL",
"update gctest_1.t_3 set cfg = '{\"id\": 11}' where id = 1",
"update gctest_1.t_3 set cfg = '{\"id\": 12, \"old_id\": 2}' where gen_id = 2",
},
[]string{
"UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` IS ? AND `cfg_json` IS ? LIMIT 1;",
"UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_3` SET `id` = ?, `cfg` = ? WHERE `gen_id` = ? LIMIT 1;",
"UPDATE `gctest_1`.`t_3` SET `id` = ?, `cfg` = ? WHERE `gen_id` = ? LIMIT 1;",
},
[][]interface{}{
{int32(1), int32(21), "{\"a\": 12}", int32(1), int32(18), "{}", []uint8("{}")},
{int32(2), int32(19), "{}", int32(2), int32(19), "{\"key\": \"value\"}", []uint8("{\"key\":\"value\"}")},
{int32(3), int32(20), nil, int32(3), int32(17), nil, nil},
{int32(1), int32(21), "{\"a\": 12}", int32(1)},
{int32(2), int32(19), "{}", int32(2)},
{int32(3), int32(20), nil, int32(3)},
{int32(1), "{\"id\": 11}", int32(1)},
{int32(2), "{\"id\": 12, \"old_id\": 2}", int32(2)},
},
},
{
[]string{
"delete from gctest_1.t_1 where id = 1",
"delete from gctest_1.t_1 where id = 2 and age = 19",
"delete from gctest_1.t_1 where cfg is NULL",
"delete from gctest_1.t_2 where id = 1",
"delete from gctest_1.t_2 where id = 2 and age = 19",
"delete from gctest_1.t_2 where cfg is NULL",
"delete from gctest_1.t_3 where id = 1",
"delete from gctest_1.t_3 where gen_id = 12",
},
[]string{
"DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` IS ? AND `cfg_json` IS ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_3` WHERE `gen_id` = ? LIMIT 1;",
"DELETE FROM `gctest_1`.`t_3` WHERE `gen_id` = ? LIMIT 1;",
},
[][]interface{}{
{int32(1), int32(21), "{\"a\": 12}", []uint8("{\"a\":12}")},
{int32(2), int32(19), "{}", []uint8("{}")},
{int32(3), int32(20), nil, nil},
{int32(1)},
{int32(2)},
{int32(3)},
{int32(11)},
{int32(12)},
},
},
}
dropSQLs := []string{
"drop table gctest_1.t_1",
"drop table gctest_1.t_2",
"drop table gctest_1.t_3",
"drop database gctest_1",
}
for _, sql := range createSQLs {
_, err := s.db.Exec(sql)
c.Assert(err, IsNil)
}
syncer := NewSyncer(s.cfg)
syncer.cfg.MaxRetry = 1
// use upstream db as mock downstream
syncer.toDBs = []*Conn{{db: s.db}}
for _, testCase := range testCases {
for _, sql := range testCase.sqls {
_, err := s.db.Exec(sql)
c.Assert(err, IsNil)
}
idx := 0
for {
if idx >= len(testCase.sqls) {
break
}
e, err := s.streamer.GetEvent(context.Background())
c.Assert(err, IsNil)
switch ev := e.Event.(type) {
case *replication.RowsEvent:
table, _, err := syncer.getTable(string(ev.Table.Schema), string(ev.Table.Table))
c.Assert(err, IsNil)
var (
sqls []string
args [][]interface{}
)
prunedColumns, prunedRows, err := pruneGeneratedColumnDML(table.columns, ev.Rows, table.schema, table.name, syncer.genColsCache)
c.Assert(err, IsNil)
param := &genDMLParam{
schema: table.schema,
table: table.name,
data: prunedRows,
originalData: ev.Rows,
columns: prunedColumns,
originalColumns: table.columns,
originalIndexColumns: table.indexColumns,
}
switch e.Header.EventType {
case replication.WRITE_ROWS_EVENTv0, replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2:
sqls, _, args, err = genInsertSQLs(param)
c.Assert(err, IsNil)
c.Assert(sqls[0], Equals, testCase.expected[idx])
c.Assert(args[0], DeepEquals, testCase.args[idx])
case replication.UPDATE_ROWS_EVENTv0, replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
// test with sql_mode = false only
sqls, _, args, err = genUpdateSQLs(param)
c.Assert(err, IsNil)
c.Assert(sqls[0], Equals, testCase.expected[idx])
c.Assert(args[0], DeepEquals, testCase.args[idx])
case replication.DELETE_ROWS_EVENTv0, replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2:
sqls, _, args, err = genDeleteSQLs(param)
c.Assert(err, IsNil)
c.Assert(sqls[0], Equals, testCase.expected[idx])
c.Assert(args[0], DeepEquals, testCase.args[idx])
}
idx++
default:
continue
}
}
}
for _, sql := range dropSQLs {
s.db.Exec(sql)
}
s.catchUpBinlog()
}
| [
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_USER\"",
"\"MYSQL_PSWD\""
] | [] | [
"MYSQL_PORT",
"MYSQL_USER",
"MYSQL_PSWD",
"MYSQL_HOST"
] | [] | ["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"] | go | 4 | 0 | |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
)
import _ "github.com/go-sql-driver/mysql"
import "github.com/go-martini/martini"
import "github.com/martini-contrib/render"
type ApiSession struct {
U User `json:"user"`
S []Session `json:"sessions"`
Status string `json:"status"`
}
type ApiUsers struct {
U []User `json:"users"`
Status string `json:"status"`
}
type PostData struct {
Heart int `json:"heart-score"`
Duration int `json:"watch-time"`
Show string `json:"show"`
Title string `json:"title"`
User string `json:"user"`
}
type indexPageData struct {
Fill []userPageData
}
type programPageData struct {
Name string
AvgRating int
AvgDuration int
Heart []int
Fill []userPageData
}
type userPageData struct {
User string
Title string
Show string
Heart []int
Duration int
Rating int
Pid int
}
type User struct {
Id int
Name string
}
type Session struct {
Title string
Show string
Heart int
Duration int
}
type SessionP struct {
Title string
Show string
Heart int
Duration int
User string
}
func setup_db() *sql.DB {
db, err := sql.Open("mysql", "root@tcp(localhost:3306)/")
if err != nil {
panic(err.Error())
}
err = db.Ping()
if err != nil {
panic(err.Error())
}
_, err = db.Exec(`DROP DATABASE IF EXISTS HeartRating`)
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(`CREATE DATABASE IF NOT EXISTS HeartRating`)
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(`USE HeartRating;`)
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Users(
id INT NOT NULL AUTO_INCREMENT,
username varchar(255) UNIQUE,
last_update datetime,
PRIMARY KEY (id));`)
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Programs(
id INT NOT NULL AUTO_INCREMENT,
showname VARCHAR(255),
title VARCHAR(255),
PRIMARY KEY (id));`)
if err != nil {
log.Fatal(err)
}
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Sessions(
id INT NOT NULL AUTO_INCREMENT,
program_id INT,
user_id INT,
heart INT,
duration INT,
created_at datetime,
PRIMARY KEY (id));`)
if err != nil {
log.Fatal(err)
}
return db
}
func db_get_program(db *sql.DB, pid int) (string, string, error) {
show := ""
title := ""
query := `SELECT showname, title FROM HeartRating.Programs WHERE id=?;`
row, err := db.Query(query, pid)
if err != nil {
fmt.Println(err)
return show, title, err
}
defer row.Close()
if row.Next() {
err = row.Scan(&show, &title)
if err != nil {
fmt.Println(err)
return show, title, err
}
}
return show, title, err
}
func db_user_exist(db *sql.DB, user string) bool {
if len(user) == 0 {
return false
}
query := `SELECT id FROM HeartRating.Users WHERE username=?;`
row, err := db.Query(query, user)
if err != nil {
fmt.Println(err)
return false
}
defer row.Close()
if row.Next() {
return true
}
return false
}
func db_get_user_id(db *sql.DB, user string) (int, error) {
if len(user) == 0 {
return -1, nil
}
var id int
query := `SELECT id FROM HeartRating.Users WHERE username=?;`
row, err := db.Query(query, user)
if err != nil {
fmt.Println(err)
return -1, err
}
defer row.Close()
if row.Next() {
err = row.Scan(&id)
if err != nil {
fmt.Println(err)
return -1, err
}
}
fmt.Println("user id", user, id)
return id, nil
}
func db_get_user(db *sql.DB, id int) (string, error) {
var name string
query := `SELECT username FROM HeartRating.Users WHERE id=?;`
row, err := db.Query(query, id)
if err != nil {
fmt.Println(err)
return "", err
}
defer row.Close()
if row.Next() {
err = row.Scan(&name)
if err != nil {
fmt.Println(err)
return "", err
}
}
return name, nil
}
func db_get_users_all(db *sql.DB) ([]User, error) {
query := `SELECT id, username FROM HeartRating.Users ORDER BY last_update DESC;`
row, err := db.Query(query)
if err != nil {
fmt.Println(err)
return nil, err
}
defer row.Close()
users := make([]User, 0)
i := 0
for row.Next() {
var u string
var d int
err = row.Scan(&d, &u)
if err != nil {
fmt.Println(err)
return nil, err
}
users = append(users, User{d, u})
i += 1
}
fmt.Println("users", users)
return users, nil
}
func db_get_users(db *sql.DB) ([]string, error) {
query := `SELECT username FROM HeartRating.Users ORDER BY last_update DESC;`
row, err := db.Query(query)
if err != nil {
fmt.Println(err)
return nil, err
}
defer row.Close()
users := make([]string, 0)
i := 0
for row.Next() {
var u string
err = row.Scan(&u)
if err != nil {
fmt.Println(err)
return nil, err
}
users = append(users, u)
i += 1
}
fmt.Println("users", users)
return users, nil
}
func db_program_exist(db *sql.DB, show string, title string) bool {
query := `SELECT id FROM HeartRating.Programs WHERE showname=? and title=?;`
row, err := db.Query(query, show, title)
if err != nil {
fmt.Println(err)
return false
}
defer row.Close()
if row.Next() {
return true
}
return false
}
func db_get_program_id(db *sql.DB, show string, title string) (int, error) {
id := -1
query := `SELECT id FROM HeartRating.Programs WHERE showname=? and title=?;`
row, err := db.Query(query, show, title)
if err != nil {
fmt.Println(err)
return id, err
}
defer row.Close()
if row.Next() {
err = row.Scan(&id)
if err != nil {
fmt.Println(err)
return id, err
}
}
fmt.Println("program id", id)
return id, nil
}
func db_get_user_sessions(db *sql.DB, user string) ([]Session, error) {
id, err := db_get_user_id(db, user)
if id == -1 || err != nil {
return nil, err
}
query := `SELECT program_id, heart, duration FROM HeartRating.Sessions WHERE user_id=? ORDER BY created_at DESC;`
row, err := db.Query(query, id)
if err != nil {
fmt.Println(err)
return nil, err
}
defer row.Close()
sessions := make([]Session, 0)
for row.Next() {
var pid int
var heart int
var duration int
err = row.Scan(&pid, &heart, &duration)
if err != nil {
fmt.Println(err)
return nil, err
}
show, title, err := db_get_program(db, pid)
if err != nil {
fmt.Println(err)
return nil, err
}
s := Session{title, show, heart, duration}
sessions = append(sessions, s)
}
fmt.Println("sessions", sessions)
return sessions, nil
}
func db_get_program_sessions(db *sql.DB, show string, title string) ([]SessionP, error) {
id, err := db_get_program_id(db, show, title)
if id == -1 || err != nil {
return nil, err
}
query := `SELECT user_id, heart, duration FROM HeartRating.Sessions WHERE program_id=? ORDER BY created_at DESC;`
row, err := db.Query(query, id)
if err != nil {
fmt.Println(err)
return nil, err
}
defer row.Close()
sessions := make([]SessionP, 0)
for row.Next() {
var uid int
var heart int
var duration int
err = row.Scan(&uid, &heart, &duration)
if err != nil {
fmt.Println(err)
return nil, err
}
user, err := db_get_user(db, uid)
if err != nil {
fmt.Println(err)
return nil, err
}
s := SessionP{title, show, heart, duration, user}
sessions = append(sessions, s)
}
fmt.Println("sessionps", sessions)
return sessions, nil
}
func db_new_user(db *sql.DB, user string) error {
cmd := `INSERT INTO HeartRating.Users(username, last_update) values(?, NOW());`
_, err := db.Exec(cmd, user)
if err != nil {
return err
}
return nil
}
func db_new_program(db *sql.DB, show string, title string) error {
cmd := `INSERT INTO HeartRating.Programs(showname, title) values(?, ?);`
_, err := db.Exec(cmd, show, title)
if err != nil {
return err
}
return nil
}
func db_new_session(db *sql.DB, pid int, uid int, heart int, duration int) error {
cmd := `INSERT INTO HeartRating.Sessions(program_id, user_id, heart, duration, created_at)
values(?, ?, ?, ?, NOW());`
_, err := db.Exec(cmd, pid, uid, heart, duration)
if err != nil {
return err
}
cmd = `UPDATE HeartRating.Users SET last_update=NOW() WHERE id=?`
_, err = db.Exec(cmd, uid)
if err != nil {
log.Println(err)
return err
}
return nil
}
func db_new_data(db *sql.DB) error {
return nil
}
func calc_rating(heart int, duration int) int {
dur_rating := (duration * 6) / (6000 * 22)
fmt.Println("DURATION RATING", dur_rating)
heart_rating := (20 * heart) / 4
fmt.Println("HEART RATING", heart_rating)
return dur_rating + heart_rating
}
func launch_web(db *sql.DB) {
m := martini.Classic()
m.Use(render.Renderer())
m.Get("/", func(ren render.Render) {
pd := make([]userPageData, 0)
users, _ := db_get_users(db)
for _, u := range users {
s, _ := db_get_user_sessions(db, u)
for _, v := range s {
h := make([]int, 0)
for i := 0; i < v.Heart; i++ {
h = append(h, i)
}
rating := calc_rating(v.Heart, v.Duration)
pid, _ := db_get_program_id(db, v.Show, v.Title)
pd = append(pd, userPageData{u, v.Title, v.Show, h, v.Duration / 60000, rating, pid})
}
}
dat := indexPageData{pd}
ren.HTML(200, "index", dat)
})
m.Get("/user/:user", func(params martini.Params, ren render.Render) {
pd := make([]userPageData, 0)
user := params["user"]
ses, _ := db_get_user_sessions(db, user)
for _, v := range ses {
h := make([]int, 0)
for i := 0; i < v.Heart; i++ {
h = append(h, i)
}
rating := calc_rating(v.Heart, v.Duration)
pid, _ := db_get_program_id(db, v.Show, v.Title)
pd = append(pd, userPageData{user, v.Title, v.Show, h, v.Duration / 60000, rating, pid})
}
dat := indexPageData{pd}
ren.HTML(200, "user", dat)
})
m.Get("/program/:pid", func(params martini.Params, ren render.Render) {
pd := make([]userPageData, 0)
i, _ := strconv.Atoi(params["pid"])
show, title, _ := db_get_program(db, i)
ses, _ := db_get_program_sessions(db, show, title)
avgRating := 0
avgDuration := 0
avgHearts := 0
t := ""
s := ""
for _, v := range ses {
t = v.Title
s = v.Show
h := make([]int, 0)
for i := 0; i < v.Heart; i++ {
h = append(h, i)
}
avgHearts += v.Heart
avgDuration += v.Duration / 60000
rating := calc_rating(v.Heart, v.Duration)
avgRating += rating
pd = append(pd, userPageData{v.User, v.Title, v.Show, h, v.Duration / 60000, rating, i})
}
avgRating /= len(ses)
avgDuration /= len(ses)
avgHearts /= len(ses)
avgh := make([]int, 0)
for i := 0; i < avgHearts; i++ {
avgh = append(avgh, i)
}
name := fmt.Sprintf("%s: %s", t, s)
dat := programPageData{name, avgRating, avgDuration, avgh, pd}
ren.HTML(200, "program", dat)
})
m.Post("/api/save", func(r *http.Request) string {
var dat PostData
err := json.NewDecoder(r.Body).Decode(&dat)
if err != nil {
log.Println(err)
return "failure"
}
if !db_user_exist(db, dat.User) {
db_new_user(db, dat.User)
}
uid, _ := db_get_user_id(db, dat.User)
if !db_program_exist(db, dat.Show, dat.Title) {
db_new_program(db, dat.Show, dat.Title)
}
pid, _ := db_get_program_id(db, dat.Show, dat.Title)
db_new_session(db, pid, uid, dat.Heart, dat.Duration)
return "success"
})
m.Get("/api/sessions/:user", func(params martini.Params) string {
user := params["user"]
ses, _ := db_get_user_sessions(db, user)
u, _ := db_get_user_id(db, user)
U := User{u, user}
fmt.Println("U", U)
fmt.Println("ses", ses)
resp := ApiSession{U, ses, "success"}
out, _ := json.MarshalIndent(resp, "", " ")
return string(out)
})
m.Get("/api/users", func() string {
users, _ := db_get_users_all(db)
resp := ApiUsers{users, "success"}
out, _ := json.MarshalIndent(resp, "", " ")
return string(out)
})
m.Run()
}
func test_data(db *sql.DB) {
db_new_user(db, "Alice")
db_new_user(db, "Bob")
db_new_user(db, "Carl")
db_new_user(db, "Dan")
db_new_user(db, "Evan")
db_new_user(db, "George")
db_new_user(db, "Harry")
db_new_user(db, "Iggy")
db_new_user(db, "Justine")
p := "Pilot"
db_new_program(db, p, "Daredevil")
db_new_program(db, p, "House")
db_new_program(db, p, "House of Cards")
db_new_program(db, p, "Survivor")
db_new_program(db, p, "Jeopardy")
db_new_program(db, p, "Shark Tank")
db_new_program(db, p, "Teen Moms")
db_new_program(db, p, "Cops")
pid, _ := db_get_program_id(db, p, "Daredevil")
ppid, _ := db_get_program_id(db, p, "House")
pppid, _ := db_get_program_id(db, p, "House of Cards")
ppppid, _ := db_get_program_id(db, p, "Survivor")
pppppid, _ := db_get_program_id(db, p, "Jeopardy")
ppppppid, _ := db_get_program_id(db, p, "Shark Tank")
pppppppid, _ := db_get_program_id(db, p, "Teen Moms")
ppppppppid, _ := db_get_program_id(db, p, "Cops")
uid, _ := db_get_user_id(db, "Alice")
uid2, _ := db_get_user_id(db, "Bob")
uid3, _ := db_get_user_id(db, "Carl")
uid4, _ := db_get_user_id(db, "Dan")
uid5, _ := db_get_user_id(db, "Evan")
uid6, _ := db_get_user_id(db, "George")
uid7, _ := db_get_user_id(db, "Harry")
uid8, _ := db_get_user_id(db, "Iggy")
// pid, uid, heart, duration
db_new_session(db, pid, uid, 8, 22*1000*60)
db_new_session(db, ppid, uid2, 7, 5*1000*60)
db_new_session(db, pppid, uid3, 5, 9*1000*60)
db_new_session(db, ppppid, uid2, 1, 5*1000*60)
db_new_session(db, pppppid, uid3, -1, 8*1000*60)
db_new_session(db, ppppppid, uid4, 4, 10*1000*60)
db_new_session(db, pppppppid, uid5, 2, 18*1000*60)
db_new_session(db, pppppppid, uid7, 3, 11*1000*60)
db_new_session(db, ppppppppid, uid8, 2, 8*1000*60)
db_new_session(db, ppppppppid, uid6, 5, 12*1000*60)
}
func main() {
db := setup_db()
defer db.Close()
test_data(db)
launch_web(db)
}
| [] | [] | [] | [] | [] | go | null | null | null |
my_pose_track_v3.py | #!/usr/bin/python
# -*- coding:utf8 -*-
"""
Author: Haoming Chen
E-mail: [email protected]
Time: 2020/01/13
Description: 利用未来帧gt的信息,从未来回到过去进行矫正。
"""
import time
import argparse
# import vision essentials
import cv2
import numpy as np
import tensorflow as tf
import logging
# import Network
from network_MSRA152 import Network
# detector utils
from detector.detector_yolov3 import * ##
# pose estimation utils
from HPE.dataset import Preprocessing
from HPE.config import cfg
from tfflat.base import Tester
from tfflat.utils import mem_info
from tfflat.logger import colorlogger
# from nms.gpu_nms import gpu_nms
# from nms.cpu_nms import cpu_nms
# import GCN utils
from graph import visualize_pose_matching
from graph.visualize_pose_matching import *
# import my own utils
import sys, os, time
sys.path.append(os.path.abspath("./graph"))
sys.path.append(os.path.abspath("./utils"))
from utils_json import *
from utils_io_file import *
from utils_io_folder import *
from visualizer import *
from visualizer import visualizer
from utils_choose import *
import logging
from sheen import Str, ColoredHandler
from my_toolbox.json_utils import *
from my_toolbox.bipartite_graph import *
from tqdm import tqdm
flag_visualize = True
flag_nms = False # Default is False, unless you know what you are doing
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
################
##单纯为了Debug
image_crop_output_path = '/media/D/light-track/data/demo/crop'
image_seed_crop_output_path = '/media/D/light-track/data/demo/seed_crop'
tracking_gt_info = []
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(ColoredHandler())
################
def initialize_parameters():
# global video_name, img_id
global nms_method, nms_thresh, min_scores, min_box_size
nms_method = 'nms'
nms_thresh = 1.
min_scores = 1e-10
min_box_size = 0.
global keyframe_interval, enlarge_scale, pose_matching_threshold
keyframe_interval = 40 # choice examples: [2, 3, 5, 8, 10, 20, 40, 100, ....]
enlarge_scale = 0.2 # how much to enlarge the bbox before pose estimation
pose_matching_threshold = 0.5
global flag_flip
flag_flip = True
global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE
global total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS
total_time_POSE_ESTIMATOR = 0
total_time_POSE_SIMILARITY = 0
total_time_DET = 0
total_time_ALL = 0
total_time_ASSOCIATE = 0
total_num_VIDEOS = 0
total_num_FRAMES = 0
total_num_PERSONS = 0
"""test"""
global filter_bbox_number, iou_alpha1, pose_alpha1
filter_bbox_number = 0
iou_alpha1 = 1.5
pose_alpha1 = -0.95 # 求的是pose差异值,差异值越小表示越越相似。
return
def light_track(pose_estimator,
image_folder, output_json_path,
visualize_folder, output_video_path, gt_info):
global total_time_POSE_ESTIMATOR, total_time_POSE_SIMILARITY, total_time_DET, total_time_ALL, total_time_ASSOCIATE
global video_name, iou_alpha1, pose_alpha1
global filter_bbox_number, total_num_FRAMES, total_num_PERSONS, total_num_VIDEOS
''' 1. statistics: get total time for lighttrack processing'''
st_time_total = time.time()
### hyper-papermet
keypoints_number = 15
interval = 5
bbox_dets_list_list = []
keypoints_list_list = []
num_imgs = len(gt_info)
first_img_id = 0
start_from_labeled = False
if start_from_labeled:
first_img_id = find_first_labeled_opensvai_json(gt_info)
next_id = 0 # track_id 从0开始算
img_id = first_img_id
total_num_FRAMES += num_imgs
gt_frame_index_list = find_gt_frame_index_list(gt_info, interval=interval)
while img_id < num_imgs:
## loop Initialization
img_gt_info = gt_info[img_id]
image_name, labeled, candidates_info = read_image_data_opensvai_json(img_gt_info)
img_path = os.path.join(image_folder, image_name)
bbox_dets_list = [] # keyframe: start from empty
keypoints_list = [] # keyframe: start from empty
prev_frame_img_id = max(0, img_id - first_img_id - 1)
# 假如第一帧是gt帧,那么直接复制gt的结果,放到list_list中
if start_from_labeled and img_id == first_img_id:
num_dets = len(candidates_info)
for det_id in range(num_dets):
track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)
# first帧直接使用
bbox_det_dict = {"img_id": img_id,
"det_id": det_id,
"imgpath": img_path,
"track_id": track_id,
"bbox": bbox_det}
keypoints_dict = {"img_id": img_id,
"det_id": det_id,
"imgpath": img_path,
"track_id": track_id,
"keypoints": keypoints}
bbox_dets_list.append(bbox_det_dict)
keypoints_list.append(keypoints_dict)
next_id = max(next_id, track_id)
next_id += 1
bbox_dets_list_list.append(bbox_dets_list)
keypoints_list_list.append(keypoints_list)
else:
#### 持续跟踪,当img_id是gt帧的时候会将gt和预测的进行比较.
logger.info("Tracing,img_id:{}".format(img_id))
candidates_total = []
st_time_DET = time.time()
candidates_from_detector = inference_yolov3(img_path)
end_time_DET = time.time()
total_time_DET += (end_time_DET - st_time_DET)
candidates_from_prev = []
bbox_list_prev_frame = []
''' 根据先前帧的信息补充框 '''
if img_id > first_img_id:
bbox_list_prev_frame = bbox_dets_list_list[prev_frame_img_id].copy()
keypoints_list_prev_frame = keypoints_list_list[prev_frame_img_id].copy()
num_prev_bbox = len(bbox_list_prev_frame)
for prev_det_id in range(num_prev_bbox):
# obtain bbox position and track id
keypoints = keypoints_list_prev_frame[prev_det_id]['keypoints']
bbox_det_next = get_bbox_from_keypoints(keypoints)
if bbox_invalid(bbox_det_next):
continue
# xywh
candidates_from_prev.append(bbox_det_next)
''' 拿到本帧全部的候选框 '''
candidates_total = candidates_from_detector + candidates_from_prev
num_candidate = len(candidates_total)
''' 使用关节点的置信度来作为bbox的置信度 '''
candidates_dets = []
for candidate_id in range(num_candidate):
bbox_det = candidates_total[candidate_id]
bbox_det_dict = {"img_id": img_id,
"det_id": candidate_id,
"imgpath": img_path,
"track_id": None,
"bbox": bbox_det}
st_time_pose = time.time()
keypoints = inference_keypoints(pose_estimator, bbox_det_dict)[0]['keypoints']
end_time_pose = time.time()
total_time_POSE_ESTIMATOR += (end_time_pose - st_time_pose)
bbox_det_next = xywh_to_x1y1x2y2(bbox_det)
score = sum(keypoints[2::3]) / keypoints_number
# 不知道为什么他这个pose的置信度会高于1
if bbox_invalid(bbox_det_next) or score < 0.7:
filter_bbox_number += 1
continue
candidate_det = bbox_det_next + [score]
candidates_dets.append(candidate_det)
keypoints_dict = {"img_id": img_id,
"det_id": candidate_id,
"imgpath": img_path,
"track_id": None,
"keypoints": keypoints}
bbox_dets_list.append(bbox_det_dict)
keypoints_list.append(keypoints_dict)
# 根据bbox的置信度来使用nms
keep = py_cpu_nms(np.array(candidates_dets, dtype=np.float32), 0.5) if len(candidates_dets) > 0 else []
candidates_total = np.array(candidates_total)[keep]
t = bbox_dets_list.copy()
k = keypoints_list.copy()
# 筛选过后的
bbox_dets_list = [t[i] for i in keep]
keypoints_list = [k[i] for i in keep]
""" Data association """
cur_det_number = len(candidates_total)
prev_det_number = len(bbox_list_prev_frame)
if img_id == first_img_id or prev_det_number == 0:
for det_id, bbox_det_dict in enumerate(bbox_dets_list):
keypoints_dict = keypoints_list[det_id]
bbox_det_dict['det_id'] = det_id
keypoints_dict['det_id'] = det_id
track_id = next_id
bbox_det_dict['track_id'] = track_id
keypoints_dict['track_id'] = track_id
next_id = max(next_id, track_id)
next_id += 1
else:
scores = np.zeros((cur_det_number, prev_det_number))
for det_id in range(cur_det_number):
bbox_det_dict = bbox_dets_list[det_id]
keypoints_dict = keypoints_list[det_id]
bbox_det = bbox_det_dict['bbox']
keypoints = keypoints_dict['keypoints']
# 计算当前帧的bbox和先前帧bboxes的分数
for prev_det_id in range(prev_det_number):
prev_bbox_det_dict = bbox_list_prev_frame[prev_det_id]
prev_keypoints_dict = keypoints_list_prev_frame[prev_det_id]
iou_score = iou(bbox_det, prev_bbox_det_dict['bbox'], xyxy=False)
if iou_score > 0.5:
scores[det_id, prev_det_id] = iou_alpha1 * iou_score
st_time_ass = time.time()
bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,
bbox_list_prev_frame, scores,
keypoints_list, next_id)
end_time_ass = time.time()
total_time_ASSOCIATE += (end_time_ass - st_time_ass)
next_id = now_next_id
if len(bbox_dets_list) == 0:
bbox_det_dict = {"img_id": img_id,
"det_id": 0,
"track_id": None,
"imgpath": img_path,
"bbox": [0, 0, 2, 2]}
bbox_dets_list.append(bbox_det_dict)
keypoints_dict = {"img_id": img_id,
"det_id": 0,
"track_id": None,
"imgpath": img_path,
"keypoints": []}
keypoints_list.append(keypoints_dict)
bbox_dets_list_list.append(bbox_dets_list)
keypoints_list_list.append(keypoints_list)
##########################################
#### 如果是gt帧则会与预测帧的结果进行比较 ####
##########################################
if img_id in gt_frame_index_list and gt_frame_index_list.index(img_id) >= 1:
logger.info("type:{},img_id:{}".format('gt_guide', img_id))
# gt frame
num_dets = len(candidates_info)
bbox_list_prediction = bbox_dets_list_list[img_id - first_img_id].copy()
keypoints_list_prediction = keypoints_list_list[img_id - first_img_id].copy()
bbox_list_gt = []
keypoints_list_gt = []
for det_id in range(num_dets):
track_id, bbox_det, keypoints = get_candidate_info_opensvai_json(candidates_info, det_id)
bbox_det_dict = {"img_id": img_id,
"det_id": det_id,
"imgpath": img_path,
"track_id": track_id,
"bbox": bbox_det}
keypoints_dict = {"img_id": img_id,
"det_id": det_id,
"imgpath": img_path,
"track_id": track_id,
"keypoints": keypoints}
bbox_list_gt.append(bbox_det_dict)
keypoints_list_gt.append(keypoints_dict)
bbox_dets_list_list[img_id - first_img_id] = bbox_list_gt
keypoints_list_list[img_id - first_img_id] = keypoints_list_gt
need_correct = distance_between_gt_prediction(
gt_dict={"det": bbox_list_gt, "keypoints": keypoints_list_gt},
predict_dict={"det": bbox_list_prediction,
"keypoints": keypoints_list_prediction})
if need_correct:
## 往前进行矫正
correct_index = img_id - 1
correct_end_index = img_id - int(interval / 2)
# 从后往前
while correct_index >= correct_end_index:
## 假设框是对的,id错了
## 此时的prev_det_number 是gt
bbox_dets_list = bbox_dets_list_list[correct_index - first_img_id]
keypoints_list = keypoints_list_list[correct_index - first_img_id]
prev_det_number = len(bbox_list_gt)
cur_det_number = len(bbox_dets_list)
# prev 是已完成匹配的,cur是待匹配的
scores = np.zeros((cur_det_number, prev_det_number))
for det_id in range(cur_det_number):
bbox_det_dict = bbox_dets_list[det_id]
keypoints_dict = keypoints_list[det_id]
bbox_det = bbox_det_dict['bbox']
keypoints = keypoints_dict['keypoints']
# 计算当前帧的bbox和先前帧bboxes的分数
for prev_det_id in range(prev_det_number):
bbox_det_dict_gt = bbox_list_gt[prev_det_id]
iou_score = iou(bbox_det, bbox_det_dict_gt['bbox'], xyxy=False)
if iou_score > 0.2:
scores[det_id, prev_det_id] = iou_alpha1 * iou_score
if prev_det_number > 0 and cur_det_number > 0:
bbox_dets_list, keypoints_list, now_next_id = bipartite_graph_matching(bbox_dets_list,
bbox_list_gt,
scores,
keypoints_list,
next_id)
# 这一帧没有一个保留下来的bbox
if len(bbox_dets_list) == 0:
bbox_det_dict = {"img_id": img_id,
"det_id": 0,
"track_id": None,
"imgpath": img_path,
"bbox": [0, 0, 2, 2]}
bbox_dets_list.append(bbox_det_dict)
keypoints_dict = {"img_id": img_id,
"det_id": 0,
"track_id": None,
"imgpath": img_path,
"keypoints": []}
keypoints_list.append(keypoints_dict)
bbox_dets_list_list[correct_index - first_img_id] = bbox_dets_list.copy()
keypoints_list_list[correct_index - first_img_id] = keypoints_list.copy()
correct_index -= 1
img_id += 1
''' 1. statistics: get total time for lighttrack processing'''
end_time_total = time.time()
total_time_ALL += (end_time_total - st_time_total)
# convert results into openSVAI format
print("Exporting Results in openSVAI Standard Json Format...")
poses_standard = pose_to_standard_mot(keypoints_list_list, bbox_dets_list_list)
# json_str = python_to_json(poses_standard)
# print(json_str)
# output json file
pose_json_folder, _ = get_parent_folder_from_path(output_json_path)
create_folder(pose_json_folder)
write_json_to_file(poses_standard, output_json_path)
print("Json Export Finished!")
# visualization
if flag_visualize is True:
print("Visualizing Pose Tracking Results...")
create_folder(visualize_folder)
visualizer.show_all_from_standard_json(output_json_path, classes, joint_pairs, joint_names,
image_folder,
visualize_folder,
flag_track=True)
print("Visualization Finished!")
img_paths = get_immediate_childfile_paths(visualize_folder)
avg_fps = total_num_FRAMES / total_time_ALL
# make_video_from_images(img_paths, output_video_path, fps=avg_fps, size=None, is_color=True, format="XVID")
fps = 5 # 25 原来
visualizer.make_video_from_images(img_paths, output_video_path, fps=fps, size=None, is_color=True,
format="XVID")
def distance_between_gt_prediction(gt_dict, predict_dict):
"""
判断是否需要矫正
:param gt_dict:
:param predict_dict:
:return:
"""
gt_det_list = gt_dict['det']
gt_keypoints_list = gt_dict['keypoints']
predict_det_list = predict_dict['det']
predict_keypoints_list = predict_dict['keypoints']
# TODO
# for gt_det_id in gt_det_list:
# gt_det = gt_det_list[gt_det_id]
# gt_track_id = gt_det['track_id']
# for predict_det_id in predict_det_list:
# predict_det = predict_det_list[predict_det_id]
# predict_track_id = predict_det['track_id']
# if predict_track_id == gt_track_id:
return True
def find_gt_frame_index_list(gt_info, interval=5):
gt_index_list = []
prev_gt_index = -1
for index in range(len(gt_info)):
if gt_info[index]['labeled'] is True and (len(gt_index_list) == 0 or (index - prev_gt_index) % interval == 0):
prev_gt_index = index
gt_index_list.append(index)
return gt_index_list
def bipartite_graph_matching(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames,
current_keypoints_dict_list, next_id):
prev_to_cur_match = Kuhn_Munkras_match(current_bbox_dict_list, prev_bbox_dict_list, score_between_two_frames)
result_bbox_dict_list = []
result_keypoints_dict_list = []
det_number = 0
assigned_cur_bbox = []
for prev_index, cur_index in enumerate(prev_to_cur_match):
if not np.isnan(cur_index):
assigned_cur_bbox.append(cur_index)
cur_index = int(cur_index)
cur_bbox_dict = current_bbox_dict_list[cur_index]
cur_keypoints_dict = current_keypoints_dict_list[cur_index]
cur_bbox_dict['det_id'] = det_number
cur_bbox_dict['track_id'] = prev_index
cur_keypoints_dict['det_id'] = det_number
cur_keypoints_dict['track_id'] = prev_index
result_bbox_dict_list.append(cur_bbox_dict)
result_keypoints_dict_list.append(cur_keypoints_dict)
det_number += 1
# 没有分配track_id的bbox,给其新的track_id
for cur_index in range(len(current_bbox_dict_list)):
if cur_index not in assigned_cur_bbox:
cur_bbox_dict = current_bbox_dict_list[cur_index]
cur_keypoints_dict = current_keypoints_dict_list[cur_index]
cur_bbox_dict['det_id'] = det_number
cur_bbox_dict['track_id'] = next_id
cur_keypoints_dict['det_id'] = det_number
cur_keypoints_dict['track_id'] = next_id
result_bbox_dict_list.append(cur_bbox_dict)
result_keypoints_dict_list.append(cur_keypoints_dict)
det_number += 1
next_id += 1
return result_bbox_dict_list, result_keypoints_dict_list, next_id
def distance_between_two_boxs(boxA, boxB):
x1, y1, _, _ = boxA
x2, y2, _, _ = boxB
distance = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
return distance
def get_track_id_SGCN(bbox_cur_frame, bbox_list_prev_frame, keypoints_cur_frame,
keypoints_list_prev_frame):
assert (len(bbox_list_prev_frame) == len(keypoints_list_prev_frame))
min_index = None
min_matching_score = sys.maxsize
global pose_matching_threshold
# if track_id is still not assigned, the person is really missing or track is really lost
track_id = -1
for det_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
bbox_prev_frame = bbox_det_dict["bbox"]
# check the pose matching score
keypoints_dict = keypoints_list_prev_frame[det_index]
keypoints_prev_frame = keypoints_dict["keypoints"]
pose_matching_score = get_pose_matching_score(keypoints_cur_frame, keypoints_prev_frame,
bbox_cur_frame,
bbox_prev_frame)
if pose_matching_score <= pose_matching_threshold and pose_matching_score <= min_matching_score:
# match the target based on the pose matching score
min_matching_score = pose_matching_score
min_index = det_index
if min_index is None:
return -1, None
else:
track_id = bbox_list_prev_frame[min_index]["track_id"]
return track_id, min_index
def get_track_id_SpatialConsistency(bbox_cur_frame, bbox_list_prev_frame):
""" 用当前帧的bbox,去找之前帧中的bboxes的IOU值最大bbox。
使用一个bbox去前一帧找IOU值最大的。
"""
thresh = 0.3
max_iou_score = 0
max_index = -1
for bbox_index, bbox_det_dict in enumerate(bbox_list_prev_frame):
bbox_prev_frame = bbox_det_dict["bbox"]
boxA = xywh_to_x1y1x2y2(bbox_cur_frame)
boxB = xywh_to_x1y1x2y2(bbox_prev_frame)
iou_score = iou(boxA, boxB)
if iou_score > max_iou_score:
max_iou_score = iou_score
max_index = bbox_index
if max_iou_score > thresh:
track_id = bbox_list_prev_frame[max_index]["track_id"]
return track_id, max_index
else:
return -1, None
def get_pose_matching_score(keypoints_A, keypoints_B, bbox_A, bbox_B):
if keypoints_A == [] or keypoints_B == []:
print("graph not correctly generated!")
return sys.maxsize
if bbox_invalid(bbox_A) or bbox_invalid(bbox_B):
print("graph not correctly generated!")
return sys.maxsize
graph_A, flag_pass_check = keypoints_to_graph(keypoints_A, bbox_A)
if flag_pass_check is False:
print("graph not correctly generated!")
return sys.maxsize
graph_B, flag_pass_check = keypoints_to_graph(keypoints_B, bbox_B)
if flag_pass_check is False:
print("graph not correctly generated!")
return sys.maxsize
sample_graph_pair = (graph_A, graph_B)
data_A, data_B = graph_pair_to_data(sample_graph_pair)
start = time.time()
flag_match, dist = pose_matching(data_A, data_B)
end = time.time()
return dist
def is_target_lost(keypoints, method="max_average"):
num_keypoints = int(len(keypoints) / 3.0)
if method == "average":
# pure average
score = 0
for i in range(num_keypoints):
score += keypoints[3 * i + 2]
score /= num_keypoints * 1.0
print("target_score: {}".format(score))
elif method == "max_average":
score_list = keypoints[2::3]
score_list_sorted = sorted(score_list)
top_N = 4
assert (top_N < num_keypoints)
top_scores = [score_list_sorted[-i] for i in range(1, top_N + 1)]
score = sum(top_scores) / top_N
if score < 0.6:
return True
else:
return False
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4] # bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 打分从大到小排列,取index
order = scores.argsort()[::-1]
# keep为最后保留的边框
keep = []
while order.size > 0:
# order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
# 计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = np.where(ovr <= thresh)[0]
# order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口
order = order[inds + 1]
return keep
def iou(boxA, boxB, xyxy=True):
# box: (x1, y1, x2, y2)
# determine the (x, y)-coordinates of the intersection rectangle
if not xyxy:
# 如果是xy wh那么要转换数据 - xy是最小坐标
b1_x1, b1_x2 = boxA[0], boxA[0] + boxA[2]
b1_y1, b1_y2 = boxA[1], boxA[1] + boxA[3]
b2_x1, b2_x2 = boxB[0], boxB[0] + boxB[2]
b2_y1, b2_y2 = boxB[1], boxB[1] + boxB[3]
xA = max(b1_x1, b2_x1)
yA = max(b1_y1, b2_y1)
xB = min(b1_x2, b2_x2)
yB = min(b1_y2, b2_y2)
else:
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
if not xyxy:
boxAArea = (boxA[2] + 1) * (boxA[3] + 1)
boxBArea = (boxB[2] + 1) * (boxB[3] + 1)
else:
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) # w×h
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # w×h
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_bbox_from_keypoints(keypoints_python_data):
if keypoints_python_data == [] or keypoints_python_data == 45 * [0]:
return [0, 0, 2, 2]
num_keypoints = len(keypoints_python_data)
x_list = []
y_list = []
for keypoint_id in range(int(num_keypoints / 3)):
x = keypoints_python_data[3 * keypoint_id]
y = keypoints_python_data[3 * keypoint_id + 1]
vis = keypoints_python_data[3 * keypoint_id + 2] # 是否可见
if vis != 0 and vis != 3:
x_list.append(x)
y_list.append(y)
min_x = min(x_list)
min_y = min(y_list)
max_x = max(x_list)
max_y = max(y_list)
if not x_list or not y_list:
return [0, 0, 2, 2]
scale = enlarge_scale # enlarge bbox by 20% with same center position
bbox = enlarge_bbox([min_x, min_y, max_x, max_y], scale)
bbox_in_xywh = x1y1x2y2_to_xywh(bbox)
return bbox_in_xywh
def enlarge_bbox(bbox, scale):
assert (scale > 0)
min_x, min_y, max_x, max_y = bbox
margin_x = int(0.5 * scale * (max_x - min_x))
margin_y = int(0.5 * scale * (max_y - min_y))
if margin_x < 0: margin_x = 2
if margin_y < 0: margin_y = 2
min_x -= margin_x
max_x += margin_x
min_y -= margin_y
max_y += margin_y
width = max_x - min_x
height = max_y - min_y
if max_y < 0 or max_x < 0 or width <= 0 or height <= 0 or width > 2000 or height > 2000:
min_x = 0
max_x = 2
min_y = 0
max_y = 2
bbox_enlarged = [min_x, min_y, max_x, max_y]
return bbox_enlarged
def inference_keypoints(pose_estimator, test_data):
cls_dets = test_data["bbox"]
# nms on the bboxes
if flag_nms is True:
cls_dets, keep = apply_nms(cls_dets, nms_method, nms_thresh)
test_data = np.asarray(test_data)[keep]
if len(keep) == 0:
return -1
else:
test_data = [test_data]
# crop and detect pose
pose_heatmaps, details, cls_skeleton, crops, start_id, end_id = get_pose_from_bbox(pose_estimator,
test_data,
cfg)
# get keypoint positions from pose
keypoints = get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id)
# dump results
results = prepare_results(test_data[0], keypoints, cls_dets)
return results
def apply_nms(cls_dets, nms_method, nms_thresh):
# nms and filter
keep = np.where((cls_dets[:, 4] >= min_scores) &
((cls_dets[:, 3] - cls_dets[:, 1]) * (
cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0]
cls_dets = cls_dets[keep]
if len(cls_dets) > 0:
if nms_method == 'nms':
keep = gpu_nms(cls_dets, nms_thresh)
elif nms_method == 'soft':
keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2)
else:
assert False
cls_dets = cls_dets[keep]
return cls_dets, keep
def get_pose_from_bbox(pose_estimator, test_data, cfg):
cls_skeleton = np.zeros(
(len(test_data), cfg.nr_skeleton, 3)) # cfg.nr_skeleton=joint number. size=number*3
crops = np.zeros((len(test_data), 4))
batch_size = 1
start_id = 0
end_id = min(len(test_data), batch_size)
test_imgs = []
details = []
for i in range(start_id, end_id):
test_img, detail = Preprocessing(test_data[i], stage='test')
test_imgs.append(test_img)
details.append(detail)
details = np.asarray(details)
feed = test_imgs
for i in range(end_id - start_id):
ori_img = test_imgs[i][0].transpose(1, 2, 0)
if flag_flip == True:
flip_img = cv2.flip(ori_img, 1)
feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...])
feed = np.vstack(feed)
res = pose_estimator.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])[0]
res = res.transpose(0, 3, 1, 2)
if flag_flip == True:
for i in range(end_id - start_id):
fmp = res[end_id - start_id + i].transpose((1, 2, 0))
fmp = cv2.flip(fmp, 1)
fmp = list(fmp.transpose((2, 0, 1)))
for (q, w) in cfg.symmetry:
fmp[q], fmp[w] = fmp[w], fmp[q]
fmp = np.array(fmp)
res[i] += fmp
res[i] /= 2
pose_heatmaps = res
return pose_heatmaps, details, cls_skeleton, crops, start_id, end_id
def get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id):
res = pose_heatmaps
for test_image_id in range(start_id, end_id):
r0 = res[test_image_id - start_id].copy()
r0 /= 255.
r0 += 0.5
for w in range(cfg.nr_skeleton):
res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w])
border = 10
dr = np.zeros(
(cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))
dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy()
for w in range(cfg.nr_skeleton):
dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)
for w in range(cfg.nr_skeleton):
lb = dr[w].argmax()
y, x = np.unravel_index(lb, dr[w].shape)
dr[w, y, x] = 0
lb = dr[w].argmax()
py, px = np.unravel_index(lb, dr[w].shape)
y -= border
x -= border
py -= border + y
px -= border + x
ln = (px ** 2 + py ** 2) ** 0.5
delta = 0.25
if ln > 1e-3:
x += delta * px / ln
y += delta * py / ln
x = max(0, min(x, cfg.output_shape[1] - 1))
y = max(0, min(y, cfg.output_shape[0] - 1))
cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2)
cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]
# map back to original images
crops[test_image_id, :] = details[test_image_id - start_id, :]
for w in range(cfg.nr_skeleton):
cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[
1] * (crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0]
cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[
0] * (crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1]
return cls_skeleton
def prepare_results(test_data, cls_skeleton, cls_dets):
cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton)
cls_scores = 1
dump_results = []
cls_skeleton = np.concatenate(
[cls_skeleton.reshape(-1, cfg.nr_skeleton * 3),
(cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]],
axis=1)
for i in range(len(cls_skeleton)):
result = dict(image_id=test_data['img_id'],
category_id=1,
score=float(round(cls_skeleton[i][-1], 4)),
keypoints=cls_skeleton[i][:-1].round(3).tolist())
dump_results.append(result)
return dump_results
def pose_to_standard_mot(keypoints_list_list, dets_list_list):
openSVAI_python_data_list = []
num_keypoints_list = len(keypoints_list_list)
num_dets_list = len(dets_list_list)
assert (num_keypoints_list == num_dets_list)
for i in range(num_dets_list):
dets_list = dets_list_list[i]
keypoints_list = keypoints_list_list[i]
if dets_list == []:
continue
img_path = dets_list[0]["imgpath"]
img_folder_path = os.path.dirname(img_path)
img_name = os.path.basename(img_path)
img_info = {"folder": img_folder_path,
"name": img_name,
"id": [int(i)]}
openSVAI_python_data = {"image": [], "candidates": []}
openSVAI_python_data["image"] = img_info
num_dets = len(dets_list)
num_keypoints = len(
keypoints_list) # number of persons, not number of keypoints for each person
candidate_list = []
for j in range(num_dets):
keypoints_dict = keypoints_list[j]
dets_dict = dets_list[j]
img_id = keypoints_dict["img_id"]
det_id = keypoints_dict["det_id"]
track_id = keypoints_dict["track_id"]
img_path = keypoints_dict["imgpath"]
bbox_dets_data = dets_list[det_id]
det = dets_dict["bbox"]
if det == [0, 0, 2, 2]:
# do not provide keypoints
candidate = {"det_bbox": [0, 0, 2, 2],
"det_score": 0}
else:
bbox_in_xywh = det[0:4]
keypoints = keypoints_dict["keypoints"]
track_score = sum(keypoints[2::3]) / len(keypoints) / 3.0
candidate = {"det_bbox": bbox_in_xywh,
"det_score": 1,
"track_id": track_id,
"track_score": track_score,
"pose_keypoints_2d": keypoints}
candidate_list.append(candidate)
openSVAI_python_data["candidates"] = candidate_list
openSVAI_python_data_list.append(openSVAI_python_data)
return openSVAI_python_data_list
def x1y1x2y2_to_xywh(det):
x1, y1, x2, y2 = det
w, h = int(x2) - int(x1), int(y2) - int(y1)
return [x1, y1, w, h]
def xywh_to_x1y1x2y2(det):
x1, y1, w, h = det
x2, y2 = x1 + w, y1 + h
return [x1, y1, x2, y2]
def bbox_invalid(bbox):
if bbox == [0, 0, 2, 2]:
return True
if bbox[2] <= 0 or bbox[3] <= 0 or bbox[2] > 2000 or bbox[3] > 2000:
return True
return False
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# prediction [image_number,]
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # 前四位
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0] # 1是维度
# Sort by it
image_pred = image_pred[(-score).argsort()] # 将image_pred根据score排序,score越大的预测,排在越前面。
class_preds = image_pred[:, 5:].max(1, keepdim=True)[1].float() # keepdim=True shape : [...,1]
detections = torch.cat((image_pred[:, :5], class_preds), 1) # 按列拼,直接拼成它的第5个值。
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
# 所有的候选跟置信度最大的比较(也会和它自己比较)
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
if __name__ == '__main__':
global args
parser = argparse.ArgumentParser()
# parser.add_argument('--video_path', '-v', type=str, dest='video_path',
# # default="data/demo/video.mp4")
# default="data/demo/0003.m4")
# parser.add_argument('--images_path', '-i', type=str, dest='images_path',
# default="data/demo/mpii-video-pose/0001")
# parser.add_argument('--model', '-m', type=str, dest='test_model',
# default="weights/mobile-deconv/snapshot_296.ckpt")
# parser.add_argument('--model', '-m', type=str, dest='test_model',
# default="weights/CPN101/CPN_snapshot_293.ckpt")
parser.add_argument('--model', '-m', type=str, dest='test_model',
default="weights/MSRA152/MSRA_snapshot_285.ckpt")
# default="weights/mobile-deconv/snapshot_296.ckpt")
parser.add_argument('--train', type=bool, dest='train',
default=True)
# parser.add_argument('--exp_number', type=str, dest='exp_number', default='2017-val',
# help='number of experiment')
parser.add_argument('--exp_number', type=str, dest='exp_number', default='test_one_video',
help='number of experiment')
args = parser.parse_args()
args.bbox_thresh = 0.4
# initialize pose estimator
initialize_parameters()
pose_estimator = Tester(Network(), cfg)
pose_estimator.load_weights(args.test_model)
train = args.train
exp_number = args.exp_number
##################################
test_one_video = True
exp_number = "test_one_video_MSRA152_guide"
val = False
# exp_number = "2017-val-iou-{}-pose{}-together-MSRA152".format(iou_alpha1, pose_alpha1)
# exp_number = "2017-val-iou-{}-together-MSRA152-guide".format(iou_alpha1)
test = False
# exp_number = "2017-test-iou-pose-together"
experiment_output_root = '/media/F'
visualize_root_folder = "{}/exp_{}/visualize".format(experiment_output_root, exp_number)
output_video_folder = "{}/exp_{}/videos".format(experiment_output_root, exp_number)
output_json_folder = "{}/exp_{}/jsons".format(experiment_output_root, exp_number)
evaluation_folder = "{}/exp_{}/evaluation".format(experiment_output_root, exp_number)
logger_file_foler = "{}/exp_{}/log".format(experiment_output_root, exp_number)
create_folder(output_video_folder)
create_folder(output_json_folder)
create_folder(logger_file_foler)
create_folder(evaluation_folder)
create_folder(os.path.join(evaluation_folder, "annotations", "val"))
create_folder(os.path.join(evaluation_folder, "out"))
create_folder(os.path.join(evaluation_folder, "posetrack_results"))
## save log file
logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG,
filename=os.path.join(logger_file_foler, 'experiment.log'),
filemode='a')
####################################
logger.info(" test_one_video:{} val:{} test:{} ".format(test_one_video, val, test))
""" 每个JSON文件为一个视频,读取一个个的JSON文件,产生结果 """
if test_one_video:
numbers = ['24642', '24635', '23699', '23695', '23484', '23471', '23416', '22682', '22671', '22642', '22124',
'00043', '00096']
posetrack_dataset_path = "/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data"
numbers = ['23699']
input_jsons = ["/media/D/DataSet/PoseTrack2017/train/{}_mpii_relpath_5sec_trainsub_OpenSVAI.json".format(number)
for
number in numbers]
frame_number = 0
videos_number = len(input_jsons)
for input_json in tqdm(input_jsons):
videos_json_data, videos_number = read_opensvai_json(input_json)
for video_seq_id in range(videos_number):
video_json_data = videos_json_data[video_seq_id]
video_path, video_name = read_video_data_opensvai_json(video_json_data)
image_folder = os.path.join(posetrack_dataset_path, video_path)
visualize_folder = os.path.join(visualize_root_folder, video_name)
output_video_path = os.path.join(output_video_folder, "{}_out.mp4".format(video_name))
output_json_path = os.path.join(output_json_folder, "{}.json".format(video_name))
create_folder(visualize_folder)
frame_number += len(video_json_data)
light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,
video_json_data)
logger.info("videos_number:{}".format(videos_number))
logger.info("frames_number:{}".format(frame_number))
""" The PoseTrack2017 validation set """
if val:
input_jsons_folder = "/media/D/DataSet/PoseTrack2017/val/"
posetrack_dataset_path = "/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data"
val_jsons = os.listdir(input_jsons_folder)
frame_number = 0
videos_number = len(val_jsons)
for json in val_jsons:
videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))
assert len(videos_json_data) == 1
video_json_data = videos_json_data[0]
video_path, video_name = read_video_data_opensvai_json(video_json_data)
image_folder = os.path.join(posetrack_dataset_path, video_path)
visualize_folder = os.path.join(visualize_root_folder, video_name)
output_video_path = os.path.join(output_video_folder, "{}_out.mp4".format(video_name))
output_json_path = os.path.join(output_json_folder, "{}.json".format(video_name))
create_folder(visualize_folder)
frame_number += len(video_json_data)
light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,
video_json_data)
logger.info("videos_number:{}".format(videos_number))
logger.info("frames_number:{}".format(frame_number))
""" The PoseTrack2017 test set """
if test:
input_jsons_folder = "/media/D/DataSet/PoseTrack2017/test/"
posetrack_dataset_path = "/media/D/DataSet/PoseTrack/PoseTrack2017/posetrack_data"
val_jsons = os.listdir(input_jsons_folder)
frame_number = 0
videos_number = len(val_jsons)
for json in val_jsons:
videos_json_data, _ = read_opensvai_json(os.path.join(input_jsons_folder, json))
assert len(videos_json_data) == 1
video_json_data = videos_json_data['annolist']
video_path, video_name = read_video_data_opensvai_json(video_json_data)
image_folder = os.path.join(posetrack_dataset_path, video_path)
visualize_folder = os.path.join(visualize_root_folder, video_name)
output_video_path = os.path.join(output_video_folder, "{}_out.mp4".format(video_name))
output_json_path = os.path.join(output_json_folder, "{}.json".format(video_name))
create_folder(visualize_folder)
frame_number += len(video_json_data)
light_track(pose_estimator, image_folder, output_json_path, visualize_folder, output_video_path,
video_json_data)
logger.info("videos_number:{}".format(videos_number))
logger.info("frames_number:{}".format(frame_number))
''' Display statistics '''
logger.info("total_time_ALL: {:.2f}s".format(total_time_ALL))
logger.info("total_time_DET: {:.2f}s".format(total_time_DET))
logger.info("total_time_POSE_ESTIMATOR: {:.2f}s".format(total_time_POSE_ESTIMATOR))
logger.info("total_time_POSE_SIMILARITY: {:.2f}s".format(total_time_POSE_SIMILARITY))
logger.info("total_time_ASSOCIATE: {:.2f}s".format(total_time_ASSOCIATE))
logger.info("total_time_LIGHTTRACK: {:.2f}s".format(
total_time_ALL - total_time_DET - total_time_POSE_ESTIMATOR - total_time_POSE_SIMILARITY - total_time_ASSOCIATE))
logger.info("filter_bbox_number:{}".format(filter_bbox_number))
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/net/http/server.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP server. See RFC 2616.
package http
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/textproto"
"net/url"
"os"
"path"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/lex/httplex"
)
// Errors used by the HTTP server.
var (
// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
// when the HTTP method or response code does not permit a
// body.
ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
// ErrHijacked is returned by ResponseWriter.Write calls when
// the underlying connection has been hijacked using the
// Hijacker interfaced.
ErrHijacked = errors.New("http: connection has been hijacked")
// ErrContentLength is returned by ResponseWriter.Write calls
// when a Handler set a Content-Length response header with a
// declared size and then attempted to write more bytes than
// declared.
ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
// Deprecated: ErrWriteAfterFlush is no longer used.
ErrWriteAfterFlush = errors.New("unused")
)
// A Handler responds to an HTTP request.
//
// ServeHTTP should write reply headers and data to the ResponseWriter
// and then return. Returning signals that the request is finished; it
// is not valid to use the ResponseWriter or read from the
// Request.Body after or concurrently with the completion of the
// ServeHTTP call.
//
// Depending on the HTTP client software, HTTP protocol version, and
// any intermediaries between the client and the Go server, it may not
// be possible to read from the Request.Body after writing to the
// ResponseWriter. Cautious handlers should read the Request.Body
// first, and then reply.
//
// Except for reading the body, handlers should not modify the
// provided Request.
//
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
// and hangs up the connection.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
//
// A ResponseWriter may not be used after the Handler.ServeHTTP method
// has returned.
type ResponseWriter interface {
// Header returns the header map that will be sent by
// WriteHeader. Changing the header after a call to
// WriteHeader (or Write) has no effect unless the modified
// headers were declared as trailers by setting the
// "Trailer" header before the call to WriteHeader (see example).
// To suppress implicit response headers, set their value to nil.
Header() Header
// Write writes the data to the connection as part of an HTTP reply.
//
// If WriteHeader has not yet been called, Write calls
// WriteHeader(http.StatusOK) before writing the data. If the Header
// does not contain a Content-Type line, Write adds a Content-Type set
// to the result of passing the initial 512 bytes of written data to
// DetectContentType.
//
// Depending on the HTTP protocol version and the client, calling
// Write or WriteHeader may prevent future reads on the
// Request.Body. For HTTP/1.x requests, handlers should read any
// needed request body data before writing the response. Once the
// headers have been flushed (due to either an explicit Flusher.Flush
// call or writing enough data to trigger a flush), the request body
// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
// handlers to continue to read the request body while concurrently
// writing the response. However, such behavior may not be supported
// by all HTTP/2 clients. Handlers should read before writing if
// possible to maximize compatibility.
Write([]byte) (int, error)
// WriteHeader sends an HTTP response header with status code.
// If WriteHeader is not called explicitly, the first call to Write
// will trigger an implicit WriteHeader(http.StatusOK).
// Thus explicit calls to WriteHeader are mainly used to
// send error codes.
WriteHeader(int)
}
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
// support Flusher, but ResponseWriter wrappers may not. Handlers
// should always test for this ability at runtime.
//
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
// Flush sends any buffered data to the client.
Flush()
}
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
//
// The default ResponseWriter for HTTP/1.x connections supports
// Hijacker, but HTTP/2 connections intentionally do not.
// ResponseWriter wrappers may also not support Hijacker. Handlers
// should always test for this ability at runtime.
type Hijacker interface {
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the HTTP server library
// will not do anything else with the connection.
//
// It becomes the caller's responsibility to manage
// and close the connection.
//
// The returned net.Conn may have read or write deadlines
// already set, depending on the configuration of the
// Server. It is the caller's responsibility to set
// or clear those deadlines as needed.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
type CloseNotifier interface {
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
//
// CloseNotify may wait to notify until Request.Body has been
// fully read.
//
// After the Handler has returned, there is no guarantee
// that the channel receives a value.
//
// If the protocol is HTTP/1.1 and CloseNotify is called while
// processing an idempotent request (such a GET) while
// HTTP/1.1 pipelining is in use, the arrival of a subsequent
// pipelined request may cause a value to be sent on the
// returned channel. In practice HTTP/1.1 pipelining is not
// enabled in browsers and not seen often in the wild. If this
// is a problem, use HTTP/2 or only use CloseNotify on methods
// such as POST.
CloseNotify() <-chan bool
}
var (
// ServerContextKey is a context key. It can be used in HTTP
// handlers with context.WithValue to access the server that
// started the handler. The associated value will be of
// type *Server.
ServerContextKey = &contextKey{"http-server"}
// LocalAddrContextKey is a context key. It can be used in
// HTTP handlers with context.WithValue to access the address
// the local address the connection arrived on.
// The associated value will be of type net.Addr.
LocalAddrContextKey = &contextKey{"local-addr"}
)
// A conn represents the server side of an HTTP connection.
type conn struct {
// server is the server on which the connection arrived.
// Immutable; never nil.
server *Server
// rwc is the underlying network connection.
// This is never wrapped by other types and is the value given out
// to CloseNotifier callers. It is usually of type *net.TCPConn or
// *tls.Conn.
rwc net.Conn
// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
// inside the Listener's Accept goroutine, as some implementations block.
// It is populated immediately inside the (*conn).serve goroutine.
// This is the value of a Handler's (*Request).RemoteAddr.
remoteAddr string
// tlsState is the TLS connection state when using TLS.
// nil means not TLS.
tlsState *tls.ConnectionState
// werr is set to the first write error to rwc.
// It is set via checkConnErrorWriter{w}, where bufw writes.
werr error
// r is bufr's read source. It's a wrapper around rwc that provides
// io.LimitedReader-style limiting (while reading request headers)
// and functionality to support CloseNotifier. See *connReader docs.
r *connReader
// bufr reads from r.
// Users of bufr must hold mu.
bufr *bufio.Reader
// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
bufw *bufio.Writer
// lastMethod is the method of the most recent request
// on this connection, if any.
lastMethod string
// mu guards hijackedv, use of bufr, (*response).closeNotifyCh.
mu sync.Mutex
// hijackedv is whether this connection has been hijacked
// by a Handler with the Hijacker interface.
// It is guarded by mu.
hijackedv bool
}
func (c *conn) hijacked() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.hijackedv
}
// c.mu must be held.
func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if c.hijackedv {
return nil, nil, ErrHijacked
}
c.hijackedv = true
rwc = c.rwc
buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
c.setState(rwc, StateHijacked)
return
}
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048
// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
res *response
// header is either nil or a deep clone of res.handlerHeader
// at the time of res.WriteHeader, if res.WriteHeader is
// called and extra buffering is being done to calculate
// Content-Type and/or Content-Length.
header Header
// wroteHeader tells whether the header's been written to "the
// wire" (or rather: w.conn.buf). this is unlike
// (*response).wroteHeader, which tells only whether it was
// logically written.
wroteHeader bool
// set by the writeHeader method:
chunking bool // using chunked transfer encoding for reply body
}
var (
crlf = []byte("\r\n")
colonSpace = []byte(": ")
)
func (cw *chunkWriter) Write(p []byte) (n int, err error) {
if !cw.wroteHeader {
cw.writeHeader(p)
}
if cw.res.req.Method == "HEAD" {
// Eat writes.
return len(p), nil
}
if cw.chunking {
_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
if err != nil {
cw.res.conn.rwc.Close()
return
}
}
n, err = cw.res.conn.bufw.Write(p)
if cw.chunking && err == nil {
_, err = cw.res.conn.bufw.Write(crlf)
}
if err != nil {
cw.res.conn.rwc.Close()
}
return
}
func (cw *chunkWriter) flush() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
cw.res.conn.bufw.Flush()
}
func (cw *chunkWriter) close() {
if !cw.wroteHeader {
cw.writeHeader(nil)
}
if cw.chunking {
bw := cw.res.conn.bufw // conn's bufio writer
// zero chunk to mark EOF
bw.WriteString("0\r\n")
if len(cw.res.trailers) > 0 {
trailers := make(Header)
for _, h := range cw.res.trailers {
if vv := cw.res.handlerHeader[h]; len(vv) > 0 {
trailers[h] = vv
}
}
trailers.Write(bw) // the writer handles noting errors
}
// final blank line after the trailers (whether
// present or not)
bw.WriteString("\r\n")
}
}
// A response represents the server side of an HTTP response.
type response struct {
conn *conn
req *Request // request for this response
reqBody io.ReadCloser
cancelCtx context.CancelFunc // when ServeHTTP exits
wroteHeader bool // reply header has been (logically) written
wroteContinue bool // 100 Continue response was written
wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
wantsClose bool // HTTP request has Connection "close"
w *bufio.Writer // buffers output in chunks to chunkWriter
cw chunkWriter
// handlerHeader is the Header that Handlers get access to,
// which may be retained and mutated even after WriteHeader.
// handlerHeader is copied into cw.header at WriteHeader
// time, and privately mutated thereafter.
handlerHeader Header
calledHeader bool // handler accessed handlerHeader via Header
written int64 // number of bytes written in body
contentLength int64 // explicitly-declared Content-Length; or -1
status int // status code passed to WriteHeader
// close connection after this reply. set on request and
// updated after response from handler if there's a
// "Connection: keep-alive" response header and a
// Content-Length.
closeAfterReply bool
// requestBodyLimitHit is set by requestTooLarge when
// maxBytesReader hits its max size. It is checked in
// WriteHeader, to make sure we don't consume the
// remaining request body to try to advance to the next HTTP
// request. Instead, when this is set, we stop reading
// subsequent requests on this connection and stop reading
// input from it.
requestBodyLimitHit bool
// trailers are the headers to be sent after the handler
// finishes writing the body. This field is initialized from
// the Trailer response header when the response header is
// written.
trailers []string
handlerDone atomicBool // set true when the handler exits
// Buffers for Date and Content-Length
dateBuf [len(TimeFormat)]byte
clenBuf [10]byte
// closeNotifyCh is non-nil once CloseNotify is called.
// Guarded by conn.mu
closeNotifyCh <-chan bool
}
type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (w *response) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Content-Length", "Trailer":
// Forbidden by RFC 2616 14.40.
return
}
w.trailers = append(w.trailers, k)
}
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
w.closeAfterReply = true
w.requestBodyLimitHit = true
if !w.wroteHeader {
w.Header().Set("Connection", "close")
}
}
// needsSniff reports whether a Content-Type still needs to be sniffed.
func (w *response) needsSniff() bool {
_, haveType := w.handlerHeader["Content-Type"]
return !w.cw.wroteHeader && !haveType && w.written < sniffLen
}
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
type writerOnly struct {
io.Writer
}
func srcIsRegularFile(src io.Reader) (isRegular bool, err error) {
switch v := src.(type) {
case *os.File:
fi, err := v.Stat()
if err != nil {
return false, err
}
return fi.Mode().IsRegular(), nil
case *io.LimitedReader:
return srcIsRegularFile(v.R)
default:
return
}
}
// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile.
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
// Our underlying w.conn.rwc is usually a *TCPConn (with its
// own ReadFrom method). If not, or if our src isn't a regular
// file, just fall back to the normal copy method.
rf, ok := w.conn.rwc.(io.ReaderFrom)
regFile, err := srcIsRegularFile(src)
if err != nil {
return 0, err
}
if !ok || !regFile {
bufp := copyBufPool.Get().(*[]byte)
defer copyBufPool.Put(bufp)
return io.CopyBuffer(writerOnly{w}, src, *bufp)
}
// sendfile path:
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if w.needsSniff() {
n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
n += n0
if err != nil {
return n, err
}
}
w.w.Flush() // get rid of any previous writes
w.cw.flush() // make sure Header is written; flush data to rwc
// Now that cw has been flushed, its chunking field is guaranteed initialized.
if !w.cw.chunking && w.bodyAllowed() {
n0, err := rf.ReadFrom(src)
n += n0
w.written += n0
return n, err
}
n0, err := io.Copy(writerOnly{w}, src)
n += n0
return n, err
}
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false
// Create new connection from rwc.
func (srv *Server) newConn(rwc net.Conn) *conn {
c := &conn{
server: srv,
rwc: rwc,
}
if debugServerConnections {
c.rwc = newLoggingConn("server", c.rwc)
}
return c
}
type readResult struct {
n int
err error
b byte // byte read, if n == 1
}
// connReader is the io.Reader wrapper used by *conn. It combines a
// selectively-activated io.LimitedReader (to bound request header
// read sizes) with support for selectively keeping an io.Reader.Read
// call blocked in a background goroutine to wait for activity and
// trigger a CloseNotifier channel.
type connReader struct {
r io.Reader
remain int64 // bytes remaining
// ch is non-nil if a background read is in progress.
// It is guarded by conn.mu.
ch chan readResult
}
func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
func (cr *connReader) Read(p []byte) (n int, err error) {
if cr.hitReadLimit() {
return 0, io.EOF
}
if len(p) == 0 {
return
}
if int64(len(p)) > cr.remain {
p = p[:cr.remain]
}
// Is a background read (started by CloseNotifier) already in
// flight? If so, wait for it and use its result.
ch := cr.ch
if ch != nil {
cr.ch = nil
res := <-ch
if res.n == 1 {
p[0] = res.b
cr.remain -= 1
}
return res.n, res.err
}
n, err = cr.r.Read(p)
cr.remain -= int64(n)
return
}
func (cr *connReader) startBackgroundRead(onReadComplete func()) {
if cr.ch != nil {
// Background read already started.
return
}
cr.ch = make(chan readResult, 1)
go cr.closeNotifyAwaitActivityRead(cr.ch, onReadComplete)
}
func (cr *connReader) closeNotifyAwaitActivityRead(ch chan<- readResult, onReadComplete func()) {
var buf [1]byte
n, err := cr.r.Read(buf[:1])
onReadComplete()
ch <- readResult{n, err, buf[0]}
}
var (
bufioReaderPool sync.Pool
bufioWriter2kPool sync.Pool
bufioWriter4kPool sync.Pool
)
var copyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
func bufioWriterPool(size int) *sync.Pool {
switch size {
case 2 << 10:
return &bufioWriter2kPool
case 4 << 10:
return &bufioWriter4kPool
}
return nil
}
func newBufioReader(r io.Reader) *bufio.Reader {
if v := bufioReaderPool.Get(); v != nil {
br := v.(*bufio.Reader)
br.Reset(r)
return br
}
// Note: if this reader size is every changed, update
// TestHandlerBodyClose's assumptions.
return bufio.NewReader(r)
}
func putBufioReader(br *bufio.Reader) {
br.Reset(nil)
bufioReaderPool.Put(br)
}
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
pool := bufioWriterPool(size)
if pool != nil {
if v := pool.Get(); v != nil {
bw := v.(*bufio.Writer)
bw.Reset(w)
return bw
}
}
return bufio.NewWriterSize(w, size)
}
func putBufioWriter(bw *bufio.Writer) {
bw.Reset(nil)
if pool := bufioWriterPool(bw.Available()); pool != nil {
pool.Put(bw)
}
}
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
func (srv *Server) maxHeaderBytes() int {
if srv.MaxHeaderBytes > 0 {
return srv.MaxHeaderBytes
}
return DefaultMaxHeaderBytes
}
func (srv *Server) initialReadLimitSize() int64 {
return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}
// wrapper around io.ReaderCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed bool
sawEOF bool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
if ecr.closed {
return 0, ErrBodyReadAfterClose
}
if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
ecr.resp.wroteContinue = true
ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
ecr.resp.conn.bufw.Flush()
}
n, err = ecr.readCloser.Read(p)
if err == io.EOF {
ecr.sawEOF = true
}
return
}
func (ecr *expectContinueReader) Close() error {
ecr.closed = true
return ecr.readCloser.Close()
}
// TimeFormat is the time format to use when generating times in HTTP
// headers. It is like time.RFC1123 but hard-codes GMT as the time
// zone. The time being formatted must be in UTC for Format to
// generate the correct format.
//
// For parsing this time format, see ParseTime.
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
const days = "SunMonTueWedThuFriSat"
const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
t = t.UTC()
yy, mm, dd := t.Date()
hh, mn, ss := t.Clock()
day := days[3*t.Weekday():]
mon := months[3*(mm-1):]
return append(b,
day[0], day[1], day[2], ',', ' ',
byte('0'+dd/10), byte('0'+dd%10), ' ',
mon[0], mon[1], mon[2], ' ',
byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
byte('0'+hh/10), byte('0'+hh%10), ':',
byte('0'+mn/10), byte('0'+mn%10), ':',
byte('0'+ss/10), byte('0'+ss%10), ' ',
'G', 'M', 'T')
}
var errTooLarge = errors.New("http: request too large")
// Read next request from connection.
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
if c.hijacked() {
return nil, ErrHijacked
}
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
defer func() {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}()
}
c.r.setReadLimit(c.server.initialReadLimitSize())
c.mu.Lock() // while using bufr
if c.lastMethod == "POST" {
// RFC 2616 section 4.1 tolerance for old buggy clients.
peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
c.bufr.Discard(numLeadingCRorLF(peek))
}
req, err := readRequest(c.bufr, keepHostHeader)
c.mu.Unlock()
if err != nil {
if c.r.hitReadLimit() {
return nil, errTooLarge
}
return nil, err
}
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
hosts, haveHost := req.Header["Host"]
isH2Upgrade := req.isH2Upgrade()
if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
return nil, badRequestError("too many Host headers")
}
if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) {
return nil, badRequestError("malformed Host header")
}
for k, vv := range req.Header {
if !httplex.ValidHeaderFieldName(k) {
return nil, badRequestError("invalid header name")
}
for _, v := range vv {
if !httplex.ValidHeaderFieldValue(v) {
return nil, badRequestError("invalid header value")
}
}
}
delete(req.Header, "Host")
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok {
body.doEarlyClose = true
}
w = &response{
conn: c,
cancelCtx: cancelCtx,
req: req,
reqBody: req.Body,
handlerHeader: make(Header),
contentLength: -1,
// We populate these ahead of time so we're not
// reading from req.Header after their Handler starts
// and maybe mutates it (Issue 14940)
wants10KeepAlive: req.wantsHttp10KeepAlive(),
wantsClose: req.wantsClose(),
}
if isH2Upgrade {
w.closeAfterReply = true
}
w.cw.res = w
w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
return w, nil
}
func (w *response) Header() Header {
if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
// Accessing the header between logically writing it
// and physically writing it means we need to allocate
// a clone to snapshot the logically written state.
w.cw.header = w.handlerHeader.clone()
}
w.calledHeader = true
return w.handlerHeader
}
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
// consumed by a handler that the server will read from the client
// in order to keep a connection alive. If there are more bytes than
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway. (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10
func (w *response) WriteHeader(code int) {
if w.conn.hijacked() {
w.conn.server.logf("http: response.WriteHeader on hijacked connection")
return
}
if w.wroteHeader {
w.conn.server.logf("http: multiple response.WriteHeader calls")
return
}
w.wroteHeader = true
w.status = code
if w.calledHeader && w.cw.header == nil {
w.cw.header = w.handlerHeader.clone()
}
if cl := w.handlerHeader.get("Content-Length"); cl != "" {
v, err := strconv.ParseInt(cl, 10, 64)
if err == nil && v >= 0 {
w.contentLength = v
} else {
w.conn.server.logf("http: invalid Content-Length of %q", cl)
w.handlerHeader.Del("Content-Length")
}
}
}
// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
contentType string
connection string
transferEncoding string
date []byte // written if not nil
contentLength []byte // written if not nil
}
// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
[]byte("Content-Type"),
[]byte("Connection"),
[]byte("Transfer-Encoding"),
}
var (
headerContentLength = []byte("Content-Length: ")
headerDate = []byte("Date: ")
)
// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
if h.date != nil {
w.Write(headerDate)
w.Write(h.date)
w.Write(crlf)
}
if h.contentLength != nil {
w.Write(headerContentLength)
w.Write(h.contentLength)
w.Write(crlf)
}
for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
if v != "" {
w.Write(extraHeaderKeys[i])
w.Write(colonSpace)
w.WriteString(v)
w.Write(crlf)
}
}
}
// writeHeader finalizes the header sent to the client and writes it
// to cw.res.conn.bufw.
//
// p is not written by writeHeader, but is the first chunk of the body
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly. It's also used to set the Content-Length, if the
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
if cw.wroteHeader {
return
}
cw.wroteHeader = true
w := cw.res
keepAlivesEnabled := w.conn.server.doKeepAlives()
isHEAD := w.req.Method == "HEAD"
// header is written out to w.conn.buf below. Depending on the
// state of the handler, we either own the map or not. If we
// don't own it, the exclude map is created lazily for
// WriteSubset to remove headers. The setHeader struct holds
// headers we need to add.
header := cw.header
owned := header != nil
if !owned {
header = w.handlerHeader
}
var excludeHeader map[string]bool
delHeader := func(key string) {
if owned {
header.Del(key)
return
}
if _, ok := header[key]; !ok {
return
}
if excludeHeader == nil {
excludeHeader = make(map[string]bool)
}
excludeHeader[key] = true
}
var setHeader extraHeader
trailers := false
for _, v := range cw.header["Trailer"] {
trailers = true
foreachHeaderElement(v, cw.res.declareTrailer)
}
te := header.get("Transfer-Encoding")
hasTE := te != ""
// If the handler is done but never sent a Content-Length
// response header and this is our first (and last) write, set
// it, even to zero. This helps HTTP/1.0 clients keep their
// "keep-alive" connections alive.
// Exceptions: 304/204/1xx responses never get Content-Length, and if
// it was a HEAD request, we don't know the difference between
// 0 actual bytes and 0 bytes because the handler noticed it
// was a HEAD request and chose not to write anything. So for
// HEAD, the handler should either write the Content-Length or
// write non-zero bytes. If it's actually 0 bytes and the
// handler never looked at the Request.Method, we just don't
// send a Content-Length header.
// Further, we don't send an automatic Content-Length if they
// set a Transfer-Encoding, because they're generally incompatible.
if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
w.contentLength = int64(len(p))
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
}
// If this was an HTTP/1.0 request with keep-alive and we sent a
// Content-Length back, we can make this a keep-alive response ...
if w.wants10KeepAlive && keepAlivesEnabled {
sentLength := header.get("Content-Length") != ""
if sentLength && header.get("Connection") == "keep-alive" {
w.closeAfterReply = false
}
}
// Check for a explicit (and valid) Content-Length header.
hasCL := w.contentLength != -1
if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
_, connectionHeaderSet := header["Connection"]
if !connectionHeaderSet {
setHeader.connection = "keep-alive"
}
} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
w.closeAfterReply = true
}
if header.get("Connection") == "close" || !keepAlivesEnabled {
w.closeAfterReply = true
}
// If the client wanted a 100-continue but we never sent it to
// them (or, more strictly: we never finished reading their
// request body), don't reuse this connection because it's now
// in an unknown state: we might be sending this response at
// the same time the client is now sending its request body
// after a timeout. (Some HTTP clients send Expect:
// 100-continue but knowing that some servers don't support
// it, the clients set a timer and send the body later anyway)
// If we haven't seen EOF, we can't skip over the unread body
// because we don't know if the next bytes on the wire will be
// the body-following-the-timer or the subsequent request.
// See Issue 11549.
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
w.closeAfterReply = true
}
// Per RFC 2616, we should consume the request body before
// replying, if the handler hasn't already done so. But we
// don't want to do an unbounded amount of reading here for
// DoS reasons, so we only try up to a threshold.
// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
// about HTTP/1.x Handlers concurrently reading and writing, like
// HTTP/2 handlers can do. Maybe this code should be relaxed?
if w.req.ContentLength != 0 && !w.closeAfterReply {
var discard, tooBig bool
switch bdy := w.req.Body.(type) {
case *expectContinueReader:
if bdy.resp.wroteContinue {
discard = true
}
case *body:
bdy.mu.Lock()
switch {
case bdy.closed:
if !bdy.sawEOF {
// Body was closed in handler with non-EOF error.
w.closeAfterReply = true
}
case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
tooBig = true
default:
discard = true
}
bdy.mu.Unlock()
default:
discard = true
}
if discard {
_, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1)
switch err {
case nil:
// There must be even more data left over.
tooBig = true
case ErrBodyReadAfterClose:
// Body was already consumed and closed.
case io.EOF:
// The remaining body was just consumed, close it.
err = w.reqBody.Close()
if err != nil {
w.closeAfterReply = true
}
default:
// Some other kind of error occurred, like a read timeout, or
// corrupt chunked encoding. In any case, whatever remains
// on the wire must not be parsed as another HTTP request.
w.closeAfterReply = true
}
}
if tooBig {
w.requestTooLarge()
delHeader("Connection")
setHeader.connection = "close"
}
}
code := w.status
if bodyAllowedForStatus(code) {
// If no content type, apply sniffing algorithm to body.
_, haveType := header["Content-Type"]
if !haveType && !hasTE {
setHeader.contentType = DetectContentType(p)
}
} else {
for _, k := range suppressedHeaders(code) {
delHeader(k)
}
}
if _, ok := header["Date"]; !ok {
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
}
if hasCL && hasTE && te != "identity" {
// TODO: return an error if WriteHeader gets a return parameter
// For now just ignore the Content-Length.
w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
te, w.contentLength)
delHeader("Content-Length")
hasCL = false
}
if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) {
// do nothing
} else if code == StatusNoContent {
delHeader("Transfer-Encoding")
} else if hasCL {
delHeader("Transfer-Encoding")
} else if w.req.ProtoAtLeast(1, 1) {
// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
// content-length has been provided. The connection must be closed after the
// reply is written, and no chunking is to be done. This is the setup
// recommended in the Server-Sent Events candidate recommendation 11,
// section 8.
if hasTE && te == "identity" {
cw.chunking = false
w.closeAfterReply = true
} else {
// HTTP/1.1 or greater: use chunked transfer encoding
// to avoid closing the connection at EOF.
cw.chunking = true
setHeader.transferEncoding = "chunked"
}
} else {
// HTTP version < 1.1: cannot do chunked transfer
// encoding and we don't know the Content-Length so
// signal EOF by closing connection.
w.closeAfterReply = true
delHeader("Transfer-Encoding") // in case already set
}
// Cannot use Content-Length with non-identity Transfer-Encoding.
if cw.chunking {
delHeader("Content-Length")
}
if !w.req.ProtoAtLeast(1, 0) {
return
}
if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) {
delHeader("Connection")
if w.req.ProtoAtLeast(1, 1) {
setHeader.connection = "close"
}
}
w.conn.bufw.WriteString(statusLine(w.req, code))
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
setHeader.Write(w.conn.bufw)
w.conn.bufw.Write(crlf)
}
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
// statusLines is a cache of Status-Line strings, keyed by code (for
// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a
// map keyed by struct of two fields. This map's max size is bounded
// by 2*len(statusText), two protocol types for each known official
// status code in the statusText map.
var (
statusMu sync.RWMutex
statusLines = make(map[int]string)
)
// statusLine returns a response Status-Line (RFC 2616 Section 6.1)
// for the given request and response status code.
func statusLine(req *Request, code int) string {
// Fast path:
key := code
proto11 := req.ProtoAtLeast(1, 1)
if !proto11 {
key = -key
}
statusMu.RLock()
line, ok := statusLines[key]
statusMu.RUnlock()
if ok {
return line
}
// Slow path:
proto := "HTTP/1.0"
if proto11 {
proto = "HTTP/1.1"
}
codestring := fmt.Sprintf("%03d", code)
text, ok := statusText[code]
if !ok {
text = "status code " + codestring
}
line = proto + " " + codestring + " " + text + "\r\n"
if ok {
statusMu.Lock()
defer statusMu.Unlock()
statusLines[key] = line
}
return line
}
// bodyAllowed reports whether a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
panic("")
}
return bodyAllowedForStatus(w.status)
}
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
// write a header, or just start writing. Writing before sending a header
// sends an implicitly empty 200 OK header.
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
// and which writes the chunk headers, if needed.
// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to ->
// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
// and populates c.werr with it if so. but otherwise writes to:
// 6. the rwc, the net.Conn.
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
// buffered data. More generally, we could short-circuit from (1) to
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2). The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
func (w *response) Write(data []byte) (n int, err error) {
return w.write(len(data), data, "")
}
func (w *response) WriteString(data string) (n int, err error) {
return w.write(len(data), nil, data)
}
// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
if w.conn.hijacked() {
w.conn.server.logf("http: response.Write on hijacked connection")
return 0, ErrHijacked
}
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
if lenData == 0 {
return 0, nil
}
if !w.bodyAllowed() {
return 0, ErrBodyNotAllowed
}
w.written += int64(lenData) // ignoring errors, for errorKludge
if w.contentLength != -1 && w.written > w.contentLength {
return 0, ErrContentLength
}
if dataB != nil {
return w.w.Write(dataB)
} else {
return w.w.WriteString(dataS)
}
}
func (w *response) finishRequest() {
w.handlerDone.setTrue()
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
putBufioWriter(w.w)
w.cw.close()
w.conn.bufw.Flush()
// Close the body (regardless of w.closeAfterReply) so we can
// re-use its bufio.Reader later safely.
w.reqBody.Close()
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
}
// shouldReuseConnection reports whether the underlying TCP connection can be reused.
// It must only be called after the handler is done executing.
func (w *response) shouldReuseConnection() bool {
if w.closeAfterReply {
// The request or something set while executing the
// handler indicated we shouldn't reuse this
// connection.
return false
}
if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
// Did not write enough. Avoid getting out of sync.
return false
}
// There was some error writing to the underlying connection
// during the request, so don't re-use this conn.
if w.conn.werr != nil {
return false
}
if w.closedRequestBodyEarly() {
return false
}
return true
}
func (w *response) closedRequestBodyEarly() bool {
body, ok := w.req.Body.(*body)
return ok && body.didEarlyClose()
}
func (w *response) Flush() {
if !w.wroteHeader {
w.WriteHeader(StatusOK)
}
w.w.Flush()
w.cw.flush()
}
func (c *conn) finalFlush() {
if c.bufr != nil {
// Steal the bufio.Reader (~4KB worth of memory) and its associated
// reader for a future connection.
putBufioReader(c.bufr)
c.bufr = nil
}
if c.bufw != nil {
c.bufw.Flush()
// Steal the bufio.Writer (~4KB worth of memory) and its associated
// writer for a future connection.
putBufioWriter(c.bufw)
c.bufw = nil
}
}
// Close the connection.
func (c *conn) close() {
c.finalFlush()
c.rwc.Close()
}
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond
type closeWriter interface {
CloseWrite() error
}
var _ closeWriter = (*net.TCPConn)(nil)
// closeWrite flushes any outstanding data and sends a FIN packet (if
// client is connected via TCP), signalling that we're done. We then
// pause for a bit, hoping the client processes it before any
// subsequent RST.
//
// See https://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
c.finalFlush()
if tcp, ok := c.rwc.(closeWriter); ok {
tcp.CloseWrite()
}
time.Sleep(rstAvoidanceDelay)
}
// validNPN reports whether the proto is not a blacklisted Next
// Protocol Negotiation protocol. Empty and built-in protocol types
// are blacklisted and can't be overridden with alternate
// implementations.
func validNPN(proto string) bool {
switch proto {
case "", "http/1.1", "http/1.0":
return false
}
return true
}
func (c *conn) setState(nc net.Conn, state ConnState) {
if hook := c.server.ConnState; hook != nil {
hook(nc, state)
}
}
// badRequestError is a literal string (used by in the server in HTML,
// unescaped) to tell the user why their request was bad. It should
// be plain text without user info or other embedded errors.
type badRequestError string
func (e badRequestError) Error() string { return "Bad Request: " + string(e) }
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String()
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
}
if !c.hijacked() {
c.close()
c.setState(c.rwc, StateClosed)
}
}()
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
if d := c.server.ReadTimeout; d != 0 {
c.rwc.SetReadDeadline(time.Now().Add(d))
}
if d := c.server.WriteTimeout; d != 0 {
c.rwc.SetWriteDeadline(time.Now().Add(d))
}
if err := tlsConn.Handshake(); err != nil {
c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
return
}
c.tlsState = new(tls.ConnectionState)
*c.tlsState = tlsConn.ConnectionState()
if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
if fn := c.server.TLSNextProto[proto]; fn != nil {
h := initNPNRequest{tlsConn, serverHandler{c.server}}
fn(c.server, tlsConn, h)
}
return
}
}
// HTTP/1.x from here on.
c.r = &connReader{r: c.rwc}
c.bufr = newBufioReader(c.r)
c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
ctx, cancelCtx := context.WithCancel(ctx)
defer cancelCtx()
for {
w, err := c.readRequest(ctx)
if c.r.remain != c.server.initialReadLimitSize() {
// If we read any bytes off the wire, we're active.
c.setState(c.rwc, StateActive)
}
if err != nil {
if err == errTooLarge {
// Their HTTP client may or may not be
// able to read this if we're
// responding to them and hanging up
// while they're still writing their
// request. Undefined behavior.
io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large")
c.closeWriteAndWait()
return
}
if err == io.EOF {
return // don't reply
}
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
return // don't reply
}
var publicErr string
if v, ok := err.(badRequestError); ok {
publicErr = ": " + string(v)
}
io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr)
return
}
// Expect 100 Continue support
req := w.req
if req.expectsContinue() {
if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
// Wrap the Body reader with one that replies on the connection
req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
}
} else if req.Header.get("Expect") != "" {
w.sendExpectationFailed()
return
}
// HTTP cannot have multiple simultaneous active requests.[*]
// Until the server replies to this request, it can't read another,
// so we might as well run the handler in this goroutine.
// [*] Not strictly true: HTTP pipelining. We could let them all process
// in parallel even if their responses need to be serialized.
serverHandler{c.server}.ServeHTTP(w, w.req)
w.cancelCtx()
if c.hijacked() {
return
}
w.finishRequest()
if !w.shouldReuseConnection() {
if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
return
}
c.setState(c.rwc, StateIdle)
}
}
func (w *response) sendExpectationFailed() {
// TODO(bradfitz): let ServeHTTP handlers handle
// requests with non-standard expectation[s]? Seems
// theoretical at best, and doesn't fit into the
// current ServeHTTP model anyway. We'd need to
// make the ResponseWriter an optional
// "ExpectReplier" interface or something.
//
// For now we'll just obey RFC 2616 14.20 which says
// "If a server receives a request containing an
// Expect field that includes an expectation-
// extension that it does not support, it MUST
// respond with a 417 (Expectation Failed) status."
w.Header().Set("Connection", "close")
w.WriteHeader(StatusExpectationFailed)
w.finishRequest()
}
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
if w.handlerDone.isSet() {
panic("net/http: Hijack called after ServeHTTP finished")
}
if w.wroteHeader {
w.cw.flush()
}
c := w.conn
c.mu.Lock()
defer c.mu.Unlock()
if w.closeNotifyCh != nil {
return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier in same ServeHTTP call")
}
// Release the bufioWriter that writes to the chunk writer, it is not
// used after a connection has been hijacked.
rwc, buf, err = c.hijackLocked()
if err == nil {
putBufioWriter(w.w)
w.w = nil
}
return rwc, buf, err
}
func (w *response) CloseNotify() <-chan bool {
if w.handlerDone.isSet() {
panic("net/http: CloseNotify called after ServeHTTP finished")
}
c := w.conn
c.mu.Lock()
defer c.mu.Unlock()
if w.closeNotifyCh != nil {
return w.closeNotifyCh
}
ch := make(chan bool, 1)
w.closeNotifyCh = ch
if w.conn.hijackedv {
// CloseNotify is undefined after a hijack, but we have
// no place to return an error, so just return a channel,
// even though it'll never receive a value.
return ch
}
var once sync.Once
notify := func() { once.Do(func() { ch <- true }) }
if requestBodyRemains(w.reqBody) {
// They're still consuming the request body, so we
// shouldn't notify yet.
registerOnHitEOF(w.reqBody, func() {
c.mu.Lock()
defer c.mu.Unlock()
startCloseNotifyBackgroundRead(c, notify)
})
} else {
startCloseNotifyBackgroundRead(c, notify)
}
return ch
}
// c.mu must be held.
func startCloseNotifyBackgroundRead(c *conn, notify func()) {
if c.bufr.Buffered() > 0 {
// They've consumed the request body, so anything
// remaining is a pipelined request, which we
// document as firing on.
notify()
} else {
c.r.startBackgroundRead(notify)
}
}
func registerOnHitEOF(rc io.ReadCloser, fn func()) {
switch v := rc.(type) {
case *expectContinueReader:
registerOnHitEOF(v.readCloser, fn)
case *body:
v.registerOnHitEOF(fn)
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// requestBodyRemains reports whether future calls to Read
// on rc might yield more data.
func requestBodyRemains(rc io.ReadCloser) bool {
if rc == eofReader {
return false
}
switch v := rc.(type) {
case *expectContinueReader:
return requestBodyRemains(v.readCloser)
case *body:
return v.bodyRemains()
default:
panic("unexpected type " + fmt.Sprintf("%T", rc))
}
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(ResponseWriter, *Request)
// ServeHTTP calls f(w, r).
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
f(w, r)
}
// Helper handlers
// Error replies to the request with the specified error message and HTTP code.
// It does not otherwise end the request; the caller should ensure no further
// writes are done to w.
// The error message should be plain text.
func Error(w ResponseWriter, error string, code int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(code)
fmt.Fprintln(w, error)
}
// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
// StripPrefix returns a handler that serves HTTP requests
// by removing the given prefix from the request URL's Path
// and invoking the handler h. StripPrefix handles a
// request for a path that doesn't begin with prefix by
// replying with an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
if prefix == "" {
return h
}
return HandlerFunc(func(w ResponseWriter, r *Request) {
if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
r.URL.Path = p
h.ServeHTTP(w, r)
} else {
NotFound(w, r)
}
})
}
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
if u, err := url.Parse(urlStr); err == nil {
// If url was relative, make absolute by
// combining with request path.
// The browser would probably do this for us,
// but doing it ourselves is more reliable.
// NOTE(rsc): RFC 2616 says that the Location
// line must be an absolute URI, like
// "http://www.google.com/redirect/",
// not a path like "/redirect/".
// Unfortunately, we don't know what to
// put in the host name section to get the
// client to connect to us again, so we can't
// know the right absolute URI to send back.
// Because of this problem, no one pays attention
// to the RFC; they all send back just a new path.
// So do we.
if u.Scheme == "" && u.Host == "" {
oldpath := r.URL.Path
if oldpath == "" { // should not happen, but avoid a crash if it does
oldpath = "/"
}
// no leading http://server
if urlStr == "" || urlStr[0] != '/' {
// make relative path absolute
olddir, _ := path.Split(oldpath)
urlStr = olddir + urlStr
}
var query string
if i := strings.Index(urlStr, "?"); i != -1 {
urlStr, query = urlStr[:i], urlStr[i:]
}
// clean up but preserve trailing slash
trailing := strings.HasSuffix(urlStr, "/")
urlStr = path.Clean(urlStr)
if trailing && !strings.HasSuffix(urlStr, "/") {
urlStr += "/"
}
urlStr += query
}
}
w.Header().Set("Location", urlStr)
w.WriteHeader(code)
// RFC 2616 recommends that a short note "SHOULD" be included in the
// response because older user agents may not understand 301/307.
// Shouldn't send the response for POST or HEAD; that leaves GET.
if r.Method == "GET" {
note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n"
fmt.Fprintln(w, note)
}
}
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
">", ">",
// """ is shorter than """.
`"`, """,
// "'" is shorter than "'" and apos was not in HTML until HTML5.
"'", "'",
)
func htmlEscape(s string) string {
return htmlReplacer.Replace(s)
}
// Redirect to a fixed URL
type redirectHandler struct {
url string
code int
}
func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
Redirect(w, r, rh.url, rh.code)
}
// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
func RedirectHandler(url string, code int) Handler {
return &redirectHandler{url, code}
}
// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
// Patterns name fixed, rooted paths, like "/favicon.ico",
// or rooted subtrees, like "/images/" (note the trailing slash).
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
// former will receive requests for any other paths in the
// "/images/" subtree.
//
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
// If a subtree has been registered and a request is received naming the
// subtree root without its trailing slash, ServeMux redirects that
// request to the subtree root (adding the trailing slash). This behavior can
// be overridden with a separate registration for the path without
// the trailing slash. For example, registering "/images/" causes ServeMux
// to redirect a request for "/images" to "/images/", unless "/images" has
// been registered separately.
//
// Patterns may optionally begin with a host name, restricting matches to
// URLs on that host only. Host-specific patterns take precedence over
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
//
// ServeMux also takes care of sanitizing the URL request path,
// redirecting any request containing . or .. elements or repeated slashes
// to an equivalent, cleaner URL.
type ServeMux struct {
mu sync.RWMutex
m map[string]muxEntry
hosts bool // whether any patterns contain hostnames
}
type muxEntry struct {
explicit bool
h Handler
pattern string
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux { return new(ServeMux) }
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = &defaultServeMux
var defaultServeMux ServeMux
// Does path match pattern?
func pathMatch(pattern, path string) bool {
if len(pattern) == 0 {
// should not happen
return false
}
n := len(pattern)
if pattern[n-1] != '/' {
return pattern == path
}
return len(path) >= n && path[0:n] == pattern
}
// Return the canonical path for p, eliminating . and .. elements.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// Find a handler on a handler map given a path string
// Most-specific (longest) pattern wins
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
var n = 0
for k, v := range mux.m {
if !pathMatch(k, path) {
continue
}
if h == nil || len(k) > n {
n = len(k)
h = v.h
pattern = v.pattern
}
}
return
}
// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
// to the canonical path.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
if r.Method != "CONNECT" {
if p := cleanPath(r.URL.Path); p != r.URL.Path {
_, pattern = mux.handler(r.Host, p)
url := *r.URL
url.Path = p
return RedirectHandler(url.String(), StatusMovedPermanently), pattern
}
}
return mux.handler(r.Host, r.URL.Path)
}
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
// Host-specific pattern takes precedence over generic ones
if mux.hosts {
h, pattern = mux.match(host + path)
}
if h == nil {
h, pattern = mux.match(path)
}
if h == nil {
h, pattern = NotFoundHandler(), ""
}
return
}
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
if r.RequestURI == "*" {
if r.ProtoAtLeast(1, 1) {
w.Header().Set("Connection", "close")
}
w.WriteHeader(StatusBadRequest)
return
}
h, _ := mux.Handler(r)
h.ServeHTTP(w, r)
}
// Handle registers the handler for the given pattern.
// If a handler already exists for pattern, Handle panics.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if pattern == "" {
panic("http: invalid pattern " + pattern)
}
if handler == nil {
panic("http: nil handler")
}
if mux.m[pattern].explicit {
panic("http: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern}
if pattern[0] != '/' {
mux.hosts = true
}
// Helpful behavior:
// If pattern is /tree/, insert an implicit permanent redirect for /tree.
// It can be overridden by an explicit registration.
n := len(pattern)
if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit {
// If pattern contains a host name, strip it and use remaining
// path for redirect.
path := pattern
if pattern[0] != '/' {
// In pattern, at least the last character is a '/', so
// strings.Index can't be -1.
path = pattern[strings.Index(pattern, "/"):]
}
url := &url.URL{Path: path}
mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern}
}
}
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// Handle registers the handler for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
// The documentation for ServeMux explains how patterns are matched.
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
DefaultServeMux.HandleFunc(pattern, handler)
}
// Serve accepts incoming HTTP connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
// Handler is typically nil, in which case the DefaultServeMux is used.
func Serve(l net.Listener, handler Handler) error {
srv := &Server{Handler: handler}
return srv.Serve(l)
}
// A Server defines parameters for running an HTTP server.
// The zero value for Server is a valid configuration.
type Server struct {
Addr string // TCP address to listen on, ":http" if empty
Handler Handler // handler to invoke, http.DefaultServeMux if nil
ReadTimeout time.Duration // maximum duration before timing out read of the request
WriteTimeout time.Duration // maximum duration before timing out write of the response
TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS
// MaxHeaderBytes controls the maximum number of bytes the
// server will read parsing the request header's keys and
// values, including the request line. It does not limit the
// size of the request body.
// If zero, DefaultMaxHeaderBytes is used.
MaxHeaderBytes int
// TLSNextProto optionally specifies a function to take over
// ownership of the provided TLS connection when an NPN/ALPN
// protocol upgrade has occurred. The map key is the protocol
// name negotiated. The Handler argument should be used to
// handle HTTP requests and will initialize the Request's TLS
// and RemoteAddr if not already set. The connection is
// automatically closed when the function returns.
// If TLSNextProto is nil, HTTP/2 support is enabled automatically.
TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
// ConnState specifies an optional callback function that is
// called when a client connection changes state. See the
// ConnState type and associated constants for details.
ConnState func(net.Conn, ConnState)
// ErrorLog specifies an optional logger for errors accepting
// connections and unexpected behavior from handlers.
// If nil, logging goes to os.Stderr via the log package's
// standard logger.
ErrorLog *log.Logger
disableKeepAlives int32 // accessed atomically.
nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve
nextProtoErr error
}
// A ConnState represents the state of a client connection to a server.
// It's used by the optional Server.ConnState hook.
type ConnState int
const (
// StateNew represents a new connection that is expected to
// send a request immediately. Connections begin at this
// state and then transition to either StateActive or
// StateClosed.
StateNew ConnState = iota
// StateActive represents a connection that has read 1 or more
// bytes of a request. The Server.ConnState hook for
// StateActive fires before the request has entered a handler
// and doesn't fire again until the request has been
// handled. After the request is handled, the state
// transitions to StateClosed, StateHijacked, or StateIdle.
// For HTTP/2, StateActive fires on the transition from zero
// to one active request, and only transitions away once all
// active requests are complete. That means that ConnState
// cannot be used to do per-request work; ConnState only notes
// the overall state of the connection.
StateActive
// StateIdle represents a connection that has finished
// handling a request and is in the keep-alive state, waiting
// for a new request. Connections transition from StateIdle
// to either StateActive or StateClosed.
StateIdle
// StateHijacked represents a hijacked connection.
// This is a terminal state. It does not transition to StateClosed.
StateHijacked
// StateClosed represents a closed connection.
// This is a terminal state. Hijacked connections do not
// transition to StateClosed.
StateClosed
)
var stateName = map[ConnState]string{
StateNew: "new",
StateActive: "active",
StateIdle: "idle",
StateHijacked: "hijacked",
StateClosed: "closed",
}
func (c ConnState) String() string {
return stateName[c]
}
// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
srv *Server
}
func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
handler := sh.srv.Handler
if handler == nil {
handler = DefaultServeMux
}
if req.RequestURI == "*" && req.Method == "OPTIONS" {
handler = globalOptionsHandler{}
}
handler.ServeHTTP(rw, req)
}
// ListenAndServe listens on the TCP network address srv.Addr and then
// calls Serve to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
// If srv.Addr is blank, ":http" is used.
// ListenAndServe always returns a non-nil error.
func (srv *Server) ListenAndServe() error {
addr := srv.Addr
if addr == "" {
addr = ":http"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)})
}
var testHookServerServe func(*Server, net.Listener) // used if non-nil
// Serve accepts incoming connections on the Listener l, creating a
// new service goroutine for each. The service goroutines read requests and
// then call srv.Handler to reply to them.
// Serve always returns a non-nil error.
func (srv *Server) Serve(l net.Listener) error {
defer l.Close()
if fn := testHookServerServe; fn != nil {
fn(srv, l)
}
var tempDelay time.Duration // how long to sleep on accept failure
if err := srv.setupHTTP2(); err != nil {
return err
}
// TODO: allow changing base context? can't imagine concrete
// use cases yet.
baseCtx := context.Background()
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
ctx = context.WithValue(ctx, LocalAddrContextKey, l.Addr())
for {
rw, e := l.Accept()
if e != nil {
if ne, ok := e.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay)
time.Sleep(tempDelay)
continue
}
return e
}
tempDelay = 0
c := srv.newConn(rw)
c.setState(c.rwc, StateNew) // before Serve can return
go c.serve(ctx)
}
}
func (s *Server) doKeepAlives() bool {
return atomic.LoadInt32(&s.disableKeepAlives) == 0
}
// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
func (srv *Server) SetKeepAlivesEnabled(v bool) {
if v {
atomic.StoreInt32(&srv.disableKeepAlives, 0)
} else {
atomic.StoreInt32(&srv.disableKeepAlives, 1)
}
}
func (s *Server) logf(format string, args ...interface{}) {
if s.ErrorLog != nil {
s.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// ListenAndServe listens on the TCP network address addr
// and then calls Serve with handler to handle requests
// on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
// Handler is typically nil, in which case the DefaultServeMux is
// used.
//
// A trivial example server is:
//
// package main
//
// import (
// "io"
// "net/http"
// "log"
// )
//
// // hello world, the web server
// func HelloServer(w http.ResponseWriter, req *http.Request) {
// io.WriteString(w, "hello, world!\n")
// }
//
// func main() {
// http.HandleFunc("/hello", HelloServer)
// log.Fatal(http.ListenAndServe(":12345", nil))
// }
//
// ListenAndServe always returns a non-nil error.
func ListenAndServe(addr string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
//
// A trivial example server is:
//
// import (
// "log"
// "net/http"
// )
//
// func handler(w http.ResponseWriter, req *http.Request) {
// w.Header().Set("Content-Type", "text/plain")
// w.Write([]byte("This is an example server.\n"))
// }
//
// func main() {
// http.HandleFunc("/", handler)
// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
// log.Fatal(err)
// }
//
// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
//
// ListenAndServeTLS always returns a non-nil error.
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
server := &Server{Addr: addr, Handler: handler}
return server.ListenAndServeTLS(certFile, keyFile)
}
// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls Serve to handle requests on incoming TLS connections.
// Accepted connections are configured to enable TCP keep-alives.
//
// Filenames containing a certificate and matching private key for the
// server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated. If the certificate is
// signed by a certificate authority, the certFile should be the
// concatenation of the server's certificate, any intermediates, and
// the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
//
// ListenAndServeTLS always returns a non-nil error.
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
addr := srv.Addr
if addr == "" {
addr = ":https"
}
// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
// before we clone it and create the TLS Listener.
if err := srv.setupHTTP2(); err != nil {
return err
}
config := cloneTLSConfig(srv.TLSConfig)
if !strSliceContains(config.NextProtos, "http/1.1") {
config.NextProtos = append(config.NextProtos, "http/1.1")
}
configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
if !configHasCert || certFile != "" || keyFile != "" {
var err error
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config)
return srv.Serve(tlsListener)
}
func (srv *Server) setupHTTP2() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2).
func (srv *Server) onceSetNextProtoDefaults() {
if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
return
}
// Enable HTTP/2 by default if the user hasn't otherwise
// configured their TLSNextProto map.
if srv.TLSNextProto == nil {
srv.nextProtoErr = http2ConfigureServer(srv, nil)
}
}
// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
// call runs for longer than its time limit, the handler responds with
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
//
// TimeoutHandler buffers all Handler writes to memory and does not
// support the Hijacker or Flusher interfaces.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
return &timeoutHandler{
handler: h,
body: msg,
dt: dt,
}
}
// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
var ErrHandlerTimeout = errors.New("http: Handler timeout")
type timeoutHandler struct {
handler Handler
body string
dt time.Duration
// When set, no timer will be created and this channel will
// be used instead.
testTimeout <-chan time.Time
}
func (h *timeoutHandler) errorBody() string {
if h.body != "" {
return h.body
}
return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}
func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
var t *time.Timer
timeout := h.testTimeout
if timeout == nil {
t = time.NewTimer(h.dt)
timeout = t.C
}
done := make(chan struct{})
tw := &timeoutWriter{
w: w,
h: make(Header),
}
go func() {
h.handler.ServeHTTP(tw, r)
close(done)
}()
select {
case <-done:
tw.mu.Lock()
defer tw.mu.Unlock()
dst := w.Header()
for k, vv := range tw.h {
dst[k] = vv
}
w.WriteHeader(tw.code)
w.Write(tw.wbuf.Bytes())
if t != nil {
t.Stop()
}
case <-timeout:
tw.mu.Lock()
defer tw.mu.Unlock()
w.WriteHeader(StatusServiceUnavailable)
io.WriteString(w, h.errorBody())
tw.timedOut = true
return
}
}
type timeoutWriter struct {
w ResponseWriter
h Header
wbuf bytes.Buffer
mu sync.Mutex
timedOut bool
wroteHeader bool
code int
}
func (tw *timeoutWriter) Header() Header { return tw.h }
func (tw *timeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return 0, ErrHandlerTimeout
}
if !tw.wroteHeader {
tw.writeHeader(StatusOK)
}
return tw.wbuf.Write(p)
}
func (tw *timeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader {
return
}
tw.writeHeader(code)
}
func (tw *timeoutWriter) writeHeader(code int) {
tw.wroteHeader = true
tw.code = code
}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}
func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "0")
if r.ContentLength != 0 {
// Read up to 4KB of OPTIONS body (as mentioned in the
// spec as being reserved for future use), but anything
// over that is considered a waste of server resources
// (or an attack) and we abort and close the connection,
// courtesy of MaxBytesReader's EOF behavior.
mb := MaxBytesReader(w, r.Body, 4<<10)
io.Copy(ioutil.Discard, mb)
}
}
type eofReaderWithWriteTo struct{}
func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil }
func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF }
// eofReader is a non-nil io.ReadCloser that always returns EOF.
// It has a WriteTo method so io.Copy won't need a buffer.
var eofReader = &struct {
eofReaderWithWriteTo
io.Closer
}{
eofReaderWithWriteTo{},
ioutil.NopCloser(nil),
}
// Verify that an io.Copy from an eofReader won't require a buffer.
var _ io.WriterTo = eofReader
// initNPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from NPN protocol handlers.
type initNPNRequest struct {
c *tls.Conn
h serverHandler
}
func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
if req.TLS == nil {
req.TLS = &tls.ConnectionState{}
*req.TLS = h.c.ConnectionState()
}
if req.Body == nil {
req.Body = eofReader
}
if req.RemoteAddr == "" {
req.RemoteAddr = h.c.RemoteAddr().String()
}
h.h.ServeHTTP(rw, req)
}
// loggingConn is used for debugging.
type loggingConn struct {
name string
net.Conn
}
var (
uniqNameMu sync.Mutex
uniqNameNext = make(map[string]int)
)
func newLoggingConn(baseName string, c net.Conn) net.Conn {
uniqNameMu.Lock()
defer uniqNameMu.Unlock()
uniqNameNext[baseName]++
return &loggingConn{
name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
Conn: c,
}
}
func (c *loggingConn) Write(p []byte) (n int, err error) {
log.Printf("%s.Write(%d) = ....", c.name, len(p))
n, err = c.Conn.Write(p)
log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Read(p []byte) (n int, err error) {
log.Printf("%s.Read(%d) = ....", c.name, len(p))
n, err = c.Conn.Read(p)
log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
return
}
func (c *loggingConn) Close() (err error) {
log.Printf("%s.Close() = ...", c.name)
err = c.Conn.Close()
log.Printf("%s.Close() = %v", c.name, err)
return
}
// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
// It only contains one field (and a pointer field at that), so it
// fits in an interface value without an extra allocation.
type checkConnErrorWriter struct {
c *conn
}
func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
n, err = w.c.rwc.Write(p)
if err != nil && w.c.werr == nil {
w.c.werr = err
}
return
}
func numLeadingCRorLF(v []byte) (n int) {
for _, b := range v {
if b == '\r' || b == '\n' {
n++
continue
}
break
}
return
}
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
| [
"\"GODEBUG\""
] | [] | [
"GODEBUG"
] | [] | ["GODEBUG"] | go | 1 | 0 | |
cmd/frontend/db/testing/db_testing.go | // Package testing provides database test helpers.
package testing
import (
"context"
"database/sql"
"fmt"
"hash/fnv"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"sync"
"testing"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db/dbconn"
"github.com/sourcegraph/sourcegraph/pkg/actor"
)
// MockHashPassword if non-nil is used instead of db.hashPassword. This is useful
// when running tests since we can use a faster implementation.
var MockHashPassword func(password string) (sql.NullString, error)
var MockValidPassword func(hash, password string) bool
func useFastPasswordMocks() {
// We can't care about security in tests, we care about speed.
MockHashPassword = func(password string) (sql.NullString, error) {
h := fnv.New64()
io.WriteString(h, password)
return sql.NullString{Valid: true, String: strconv.FormatUint(h.Sum64(), 16)}, nil
}
MockValidPassword = func(hash, password string) bool {
h := fnv.New64()
io.WriteString(h, password)
return hash == strconv.FormatUint(h.Sum64(), 16)
}
}
// BeforeTest functions are called before each test is run (by TestContext).
var BeforeTest []func()
// DBNameSuffix must be set by DB test packages at init time to a value that is unique among all
// other such values used by other DB test packages. This is necessary to ensure the tests do not
// concurrently use the same DB (which would cause test failures).
var DBNameSuffix = "db"
var (
connectOnce sync.Once
connectErr error
)
// TestContext constructs a new context that holds a temporary test DB
// handle and other test configuration.
//
// Callers (other than github.com/sourcegraph/sourcegraph/cmd/frontend/db) must set a name in this
// package's DBNameSuffix var that is unique among all other test packages that call TestContext, so
// that each package's tests run in separate DBs and do not conflict.
func TestContext(t *testing.T) context.Context {
useFastPasswordMocks()
if testing.Short() {
t.Skip()
}
connectOnce.Do(func() {
connectErr = initTest(DBNameSuffix)
})
if connectErr != nil {
// only ignore connection errors if not on CI
if os.Getenv("CI") == "" {
t.Skip("Could not connect to DB", connectErr)
}
t.Fatal("Could not connect to DB", connectErr)
}
ctx := context.Background()
ctx = actor.WithActor(ctx, &actor.Actor{UID: 1, Internal: true})
for _, f := range BeforeTest {
f()
}
if err := emptyDBPreserveSchema(dbconn.Global); err != nil {
log.Fatal(err)
}
return ctx
}
func emptyDBPreserveSchema(d *sql.DB) error {
_, err := d.Exec(`SELECT * FROM schema_migrations`)
if err != nil {
return fmt.Errorf("Table schema_migrations not found: %v", err)
}
return truncateDB(d)
}
func truncateDB(d *sql.DB) error {
rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND table_name != 'schema_migrations'")
if err != nil {
return err
}
var tables []string
for rows.Next() {
var table string
rows.Scan(&table)
tables = append(tables, table)
}
if err := rows.Close(); err != nil {
return err
}
if err := rows.Err(); err != nil {
return err
}
log.Printf("Truncating all %d tables", len(tables))
_, err = d.Exec("TRUNCATE " + strings.Join(tables, ", ") + " RESTART IDENTITY")
return err
}
// initTest creates a test database, named with the given suffix
// (dropping it if it already exists), and configures this package to use it.
// It is called by integration tests (in a package init func) that need to use
// a real database.
func initTest(nameSuffix string) error {
dbname := "sourcegraph-test-" + nameSuffix
out, err := exec.Command("dropdb", "--if-exists", dbname).CombinedOutput()
if err != nil {
return errors.Errorf("dropdb --if-exists failed: %v\n%s", err, string(out))
}
out, err = exec.Command("createdb", dbname).CombinedOutput()
if err != nil {
return errors.Errorf("createdb failed: %v\n%s", err, string(out))
}
return dbconn.ConnectToDB("dbname=" + dbname)
}
| [
"\"CI\""
] | [] | [
"CI"
] | [] | ["CI"] | go | 1 | 0 | |
profiler/profiler.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
package profiler
import (
"errors"
"fmt"
"os"
"runtime"
"sync"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
// outChannelSize specifies the size of the profile output channel.
const outChannelSize = 5
var (
mu sync.Mutex
activeProfiler *profiler
containerID = internal.ContainerID() // replaced in tests
)
// Start starts the profiler. It may return an error if an API key is not provided by means of
// the WithAPIKey option, or if a hostname is not found.
func Start(opts ...Option) error {
mu.Lock()
defer mu.Unlock()
if activeProfiler != nil {
activeProfiler.stop()
}
p, err := newProfiler(opts...)
if err != nil {
return err
}
activeProfiler = p
activeProfiler.run()
return nil
}
// Stop stops the profiler.
func Stop() {
mu.Lock()
if activeProfiler != nil {
activeProfiler.stop()
activeProfiler = nil
}
mu.Unlock()
}
// profiler collects and sends preset profiles to the Datadog API at a given frequency
// using a given configuration.
type profiler struct {
cfg *config // profile configuration
out chan batch // upload queue
uploadFunc func(batch) error // defaults to (*profiler).upload; replaced in tests
exit chan struct{} // exit signals the profiler to stop; it is closed after stopping
stopOnce sync.Once // stopOnce ensures the profiler is stopped exactly once.
wg sync.WaitGroup // wg waits for all goroutines to exit when stopping.
met *metrics // metric collector state
}
// newProfiler creates a new, unstarted profiler.
func newProfiler(opts ...Option) (*profiler, error) {
cfg, err := defaultConfig()
if err != nil {
return nil, err
}
for _, opt := range opts {
opt(cfg)
}
// TODO(fg) remove this after making expGoroutineWaitProfile public.
if os.Getenv("DD_PROFILING_WAIT_PROFILE") != "" {
cfg.addProfileType(expGoroutineWaitProfile)
}
if cfg.apiKey != "" {
if !isAPIKeyValid(cfg.apiKey) {
return nil, errors.New("API key has incorrect format")
}
cfg.targetURL = cfg.apiURL
} else {
cfg.targetURL = cfg.agentURL
}
if cfg.hostname == "" {
hostname, err := os.Hostname()
if err != nil {
if cfg.targetURL == cfg.apiURL {
return nil, fmt.Errorf("could not obtain hostname: %v", err)
}
log.Warn("unable to look up hostname: %v", err)
}
cfg.hostname = hostname
}
// uploadTimeout defaults to DefaultUploadTimeout, but in theory a user might
// set it to 0 or a negative value. However, it's not clear what this should
// mean, and most meanings we could assign seem to be bad: Not having a
// timeout is dangerous, having a timeout that fires immediately breaks
// uploading, and silently defaulting to the default timeout is confusing.
// So let's just stay clear of all of this by not allowing such values.
//
// see similar discussion: https://github.com/golang/go/issues/39177
if cfg.uploadTimeout <= 0 {
return nil, fmt.Errorf("invalid upload timeout, must be > 0: %s", cfg.uploadTimeout)
}
p := profiler{
cfg: cfg,
out: make(chan batch, outChannelSize),
exit: make(chan struct{}),
met: newMetrics(),
}
p.uploadFunc = p.upload
return &p, nil
}
// run runs the profiler.
func (p *profiler) run() {
if _, ok := p.cfg.types[MutexProfile]; ok {
runtime.SetMutexProfileFraction(p.cfg.mutexFraction)
}
if _, ok := p.cfg.types[BlockProfile]; ok {
runtime.SetBlockProfileRate(p.cfg.blockRate)
}
p.wg.Add(1)
go func() {
defer p.wg.Done()
tick := time.NewTicker(p.cfg.period)
defer tick.Stop()
p.met.reset(now()) // collect baseline metrics at profiler start
p.collect(tick.C)
}()
p.wg.Add(1)
go func() {
defer p.wg.Done()
p.send()
}()
}
// collect runs the profile types found in the configuration whenever the ticker receives
// an item.
func (p *profiler) collect(ticker <-chan time.Time) {
defer close(p.out)
for {
select {
case <-ticker:
now := now()
bat := batch{
host: p.cfg.hostname,
start: now,
// NB: while this is technically wrong in that it does not
// record the actual start and end timestamps for the batch,
// it is how the backend understands the client-side
// configured CPU profile duration: (start-end).
end: now.Add(p.cfg.cpuDuration),
}
for t := range p.cfg.types {
prof, err := p.runProfile(t)
if err != nil {
log.Error("Error getting %s profile: %v; skipping.", t, err)
p.cfg.statsd.Count("datadog.profiler.go.collect_error", 1, append(p.cfg.tags, t.Tag()), 1)
continue
}
bat.addProfile(prof)
}
p.enqueueUpload(bat)
case <-p.exit:
return
}
}
}
// enqueueUpload pushes a batch of profiles onto the queue to be uploaded. If there is no room, it will
// evict the oldest profile to make some. Typically a batch would be one of each enabled profile.
func (p *profiler) enqueueUpload(bat batch) {
for {
select {
case p.out <- bat:
return // 👍
default:
// queue is full; evict oldest
select {
case <-p.out:
p.cfg.statsd.Count("datadog.profiler.go.queue_full", 1, p.cfg.tags, 1)
log.Warn("Evicting one profile batch from the upload queue to make room.")
default:
// this case should be almost impossible to trigger, it would require a
// full p.out to completely drain within nanoseconds or extreme
// scheduling decisions by the runtime.
}
}
}
}
// send takes profiles from the output queue and uploads them.
func (p *profiler) send() {
for bat := range p.out {
if err := p.uploadFunc(bat); err != nil {
log.Error("Failed to upload profile: %v", err)
}
}
}
// stop stops the profiler.
func (p *profiler) stop() {
p.stopOnce.Do(func() {
close(p.exit)
})
p.wg.Wait()
}
// StatsdClient implementations can count and time certain event occurrences that happen
// in the profiler.
type StatsdClient interface {
// Count counts how many times an event happened, at the given rate using the given tags.
Count(event string, times int64, tags []string, rate float64) error
// Timing creates a distribution of the values registered as the duration of a certain event.
Timing(event string, duration time.Duration, tags []string, rate float64) error
}
| [
"\"DD_PROFILING_WAIT_PROFILE\""
] | [] | [
"DD_PROFILING_WAIT_PROFILE"
] | [] | ["DD_PROFILING_WAIT_PROFILE"] | go | 1 | 0 | |
tests/tests.go | package tests
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"os"
"time"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"twreporter.org/go-api/constants"
"twreporter.org/go-api/controllers"
"twreporter.org/go-api/models"
"twreporter.org/go-api/storage"
"twreporter.org/go-api/utils"
)
var (
DefaultAccount = "[email protected]"
DefaultService = "default_service"
DefaultToken = "default_token"
Engine *gin.Engine
DB *gorm.DB
MgoDB *mgo.Session
MgoDBName = "gorm"
// collections name
MgoPostCol = "posts"
MgoTopicCol = "topics"
MgoImgCol = "images"
MgoVideoCol = "videos"
MgoTagCol = "tags"
MgoCategoriesCol = "postcategories"
MgoThemeCol = "themes"
// objectID
ImgID1 = bson.NewObjectId()
ImgID2 = bson.NewObjectId()
VideoID = bson.NewObjectId()
PostID1 = bson.NewObjectId()
PostID2 = bson.NewObjectId()
TopicID = bson.NewObjectId()
TagID = bson.NewObjectId()
CatID = bson.NewObjectId()
ThemeID = bson.NewObjectId()
// collection
ImgCol1 models.MongoImage
ImgCol2 models.MongoImage
VideoCol models.MongoVideo
PostCol1 models.Post
PostCol2 models.Post
TagCol models.Tag
CatCol models.Category
TopicCol models.Topic
ThemeCol models.Theme
MockPostSlug1 = "mock-post-slug-1"
MockTopicSlug = "mock-topic-slug"
)
func OpenGormConnection() (db *gorm.DB, err error) {
dbhost := os.Getenv("GORM_DBADDRESS")
if dbhost != "" {
utils.Cfg.DBSettings.Address = dbhost
} else {
utils.Cfg.DBSettings.Address = "127.0.0.1"
}
utils.Cfg.DBSettings.User = "gorm"
utils.Cfg.DBSettings.Password = "gorm"
utils.Cfg.DBSettings.Port = "3306"
utils.Cfg.DBSettings.Name = "gorm"
db, _ = utils.InitDB(10, 5)
if os.Getenv("DEBUG") == "true" {
db.LogMode(true)
}
return
}
func OpenMgoConnection() (session *mgo.Session, err error) {
dbhost := os.Getenv("MGO_DBADDRESS")
if dbhost == "" {
dbhost = "localhost"
}
session, err = mgo.Dial(dbhost)
// set settings
utils.Cfg.MongoDBSettings.DBName = MgoDBName
return
}
func RunMigration() {
RunGormMigration()
RunMgoMigration()
}
func RunGormMigration() {
values := []interface{}{&models.User{}, &models.OAuthAccount{}, &models.ReporterAccount{}, &models.Bookmark{}, &models.Registration{}, &models.Service{}, &models.UsersBookmarks{}, &models.WebPushSubscription{}}
for _, value := range values {
DB.DropTable(value)
}
if err := DB.AutoMigrate(values...).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
}
func RunMgoMigration() {
err := MgoDB.DB(MgoDBName).DropDatabase()
if err != nil {
panic(fmt.Sprint("Can not drop mongo gorm database"))
}
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoPostCol}}, nil)
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoTopicCol}}, nil)
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoImgCol}}, nil)
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoVideoCol}}, nil)
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoTagCol}}, nil)
MgoDB.DB(MgoDBName).Run(bson.D{{"create", MgoCategoriesCol}}, nil)
}
func SetDefaultRecords() {
SetGormDefaultRecords()
SetMgoDefaultRecords()
}
func SetMgoDefaultRecords() {
ImgCol1 = models.MongoImage{
ID: ImgID1,
Description: "mock image desc",
Copyright: "",
Image: models.MongoImageAsset{
Height: 1200,
Filetype: "image/jpg",
Width: 2000,
URL: "https://www.twreporter.org/images/mock-image-1.jpg",
ResizedTargets: models.ResizedTargets{
Mobile: models.ImageAsset{
Height: 600,
Width: 800,
URL: "https://www.twreporter.org/images/mock-image-1-mobile.jpg",
},
Tablet: models.ImageAsset{
Height: 1000,
Width: 1400,
URL: "https://www.twreporter.org/images/mock-image-1-tablet.jpg",
},
Desktop: models.ImageAsset{
Height: 1200,
Width: 2000,
URL: "https://www.twreporter.org/images/mock-image-1-desktop.jpg",
},
Tiny: models.ImageAsset{
Height: 60,
Width: 80,
URL: "https://www.twreporter.org/images/mock-image-1-tiny.jpg",
},
W400: models.ImageAsset{
Height: 300,
Width: 400,
URL: "https://www.twreporter.org/images/mock-image-1-w400.jpg",
},
},
},
}
VideoCol = models.MongoVideo{
ID: VideoID,
Title: "mock video title",
Video: models.MongoVideoAsset{
Filetype: "video/mp4",
Size: 1000,
URL: "https://www.twreporter.org/videos/mock-video.mp4",
},
}
TagCol = models.Tag{
ID: TagID,
Name: "mock tag",
}
CatCol = models.Category{
ID: CatID,
Name: "mock postcategory",
}
ThemeCol = models.Theme{
ID: ThemeID,
Name: "photograph",
TitlePosition: "title-above",
}
PostCol1 = models.Post{
ID: PostID1,
Slug: MockPostSlug1,
Name: "mock post slug 1",
Style: "article",
State: "published",
ThemeOrigin: ThemeID,
PublishedDate: time.Now(),
HeroImageOrigin: ImgID1,
CategoriesOrigin: []bson.ObjectId{CatID},
OgImageOrigin: ImgID1,
IsFeatured: true,
TopicOrigin: TopicID,
RelatedsOrigin: []bson.ObjectId{PostID2},
}
TopicCol = models.Topic{
ID: TopicID,
Slug: MockTopicSlug,
TopicName: "mock topic slug",
Title: "mock title",
State: "published",
RelatedsOrigin: []bson.ObjectId{PostID1, PostID2},
LeadingImageOrigin: ImgID1,
LeadingVideoOrigin: VideoID,
OgImageOrigin: ImgID1,
}
// insert img1 and img2
MgoDB.DB(MgoDBName).C(MgoImgCol).Insert(ImgCol1)
ImgCol2 = ImgCol1
ImgCol2.ID = ImgID2
MgoDB.DB(MgoDBName).C(MgoImgCol).Insert(ImgCol2)
// insert video
MgoDB.DB(MgoDBName).C(MgoVideoCol).Insert(VideoCol)
// insert tag and postcategory
MgoDB.DB(MgoDBName).C(MgoTagCol).Insert(TagCol)
MgoDB.DB(MgoDBName).C(MgoCategoriesCol).Insert(CatCol)
// insert post1 and post2
MgoDB.DB(MgoDBName).C(MgoPostCol).Insert(PostCol1)
// insert theme
MgoDB.DB(MgoDBName).C(MgoThemeCol).Insert(ThemeCol)
PostCol2 = PostCol1
PostCol2.ID = PostID2
PostCol2.Slug = "mock-post-slug-2"
PostCol2.Name = "mock post slug 2"
PostCol2.Style = "review"
PostCol2.PublishedDate = time.Now()
PostCol2.HeroImageOrigin = ImgID2
PostCol2.LeadingImagePortraitOrigin = ImgID1
PostCol2.OgImageOrigin = ImgID2
PostCol2.IsFeatured = false
PostCol2.TagsOrigin = []bson.ObjectId{TagID}
MgoDB.DB(MgoDBName).C(MgoPostCol).Insert(PostCol2)
// insert topic
MgoDB.DB(MgoDBName).C(MgoTopicCol).Insert(TopicCol)
}
func SetGormDefaultRecords() {
// Set an active reporter account
ms := storage.NewGormStorage(DB)
ra := models.ReporterAccount{
Email: DefaultAccount,
ActivateToken: DefaultToken,
ActExpTime: time.Now().Add(time.Duration(15) * time.Minute),
}
_, _ = ms.InsertUserByReporterAccount(ra)
ms.CreateService(models.ServiceJSON{Name: DefaultService})
ms.CreateRegistration(DefaultService, models.RegistrationJSON{Email: DefaultAccount, ActivateToken: DefaultToken})
}
func SetupGinServer() {
// set up data storage
gs := storage.NewGormStorage(DB)
// init controllers
mc := controllers.NewMembershipController(gs)
fc := controllers.Facebook{Storage: gs}
gc := controllers.Google{Storage: gs}
ms := storage.NewMongoStorage(MgoDB)
nc := controllers.NewNewsController(ms)
cf := &controllers.ControllerFactory{
Controllers: make(map[string]controllers.Controller),
}
cf.SetController(constants.MembershipController, mc)
cf.SetController(constants.FacebookController, fc)
cf.SetController(constants.GoogleController, gc)
cf.SetController(constants.NewsController, nc)
Engine = gin.Default()
routerGroup := Engine.Group("/v1")
{
menuitems := new(controllers.MenuItemsController)
routerGroup.GET("/ping", menuitems.Retrieve)
}
routerGroup = cf.SetRoute(routerGroup)
}
func RequestWithBody(method, path, body string) (req *http.Request) {
req, _ = http.NewRequest(method, path, bytes.NewBufferString(body))
return
}
func GenerateJWT(user models.User) (jwt string) {
jwt, _ = utils.RetrieveToken(user.ID, user.Email.String)
return
}
func GetUser(email string) (user models.User) {
as := storage.NewGormStorage(DB)
user, _ = as.GetUserByEmail(email)
return
}
func ServeHTTP(method, path, body, contentType, authorization string) (resp *httptest.ResponseRecorder) {
var req *http.Request
req = RequestWithBody(method, path, body)
if contentType != "" {
req.Header.Add("Content-Type", contentType)
}
if authorization != "" {
req.Header.Add("Authorization", authorization)
}
resp = httptest.NewRecorder()
Engine.ServeHTTP(resp, req)
return
}
| [
"\"GORM_DBADDRESS\"",
"\"DEBUG\"",
"\"MGO_DBADDRESS\""
] | [] | [
"GORM_DBADDRESS",
"MGO_DBADDRESS",
"DEBUG"
] | [] | ["GORM_DBADDRESS", "MGO_DBADDRESS", "DEBUG"] | go | 3 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/148/385/CWE90_LDAP_Injection__Environment_53a.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE90_LDAP_Injection__Environment_53a.java
Label Definition File: CWE90_LDAP_Injection.label.xml
Template File: sources-sink-53a.tmpl.java
*/
/*
* @description
* CWE: 90 LDAP Injection
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks:
* BadSink : data concatenated into LDAP search, which could result in LDAP Injection
* Flow Variant: 53 Data flow: data passed as an argument from one method through two others to a fourth; all four functions are in different classes in the same package
*
* */
public class CWE90_LDAP_Injection__Environment_53a extends AbstractTestCase
{
public void bad() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
(new CWE90_LDAP_Injection__Environment_53b()).badSink(data );
}
public void good() throws Throwable
{
goodG2B();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
String data;
/* FIX: Use a hardcoded string */
data = "foo";
(new CWE90_LDAP_Injection__Environment_53b()).goodG2BSink(data );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\""
] | [] | [
"ADD"
] | [] | ["ADD"] | java | 1 | 0 | |
PLSWebsite/wsgi.py | """
WSGI config for PLSWebsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PLSWebsite.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
vendor/code.cloudfoundry.org/cli/command/v2/create_space_command.go | package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
"code.cloudfoundry.org/cli/command/flag"
)
type CreateSpaceCommand struct {
RequiredArgs flag.Space `positional-args:"yes"`
Organization string `short:"o" description:"Organization"`
Quota string `short:"q" description:"Quota to assign to the newly created space"`
usage interface{} `usage:"CF_NAME create-space SPACE [-o ORG] [-q SPACE_QUOTA]"`
relatedCommands interface{} `related_commands:"target, space-quotas, spaces"`
}
func (_ CreateSpaceCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ CreateSpaceCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
| [
"\"CF_TRACE\""
] | [] | [
"CF_TRACE"
] | [] | ["CF_TRACE"] | go | 1 | 0 | |
demo/covid_video_classification.py | import streamlit as st
import tempfile
import os
from os.path import join
import random
import torch
import numpy as np
import cv2
from model import all_models, get_model
from vector_cv_tools.utils import VideoReader
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
DOCUMENTS_ROOT = os.getenv("CV_DEMO_DOC_ROOT", default="./documents")
MEDIA_FILE_ROOT = join(DOCUMENTS_ROOT, "covid_classification")
CKPT_ROOT = join(MEDIA_FILE_ROOT, "checkpoints")
IMG_ROOT = join(MEDIA_FILE_ROOT, "imgs")
MARKDOWN_ROOT = join(MEDIA_FILE_ROOT, "markdowns")
SAMPLE_ROOT = join(MEDIA_FILE_ROOT, "sample_videos")
TEST_ROOT = join(MEDIA_FILE_ROOT, "test_videos")
sample_videos = [
"norm1_crop.mp4",
"norm2_crop.mp4",
"covid1_crop.mp4",
"covid2_crop.mp4",
"pnue1_crop.mp4",
"pnue2_crop.mp4",
]
test_videos = [
"normal.mp4",
"covid.mp4",
"pneumonia.mp4",
]
sample_video_bytes = [open(join(SAMPLE_ROOT, vid), 'rb').read() for vid in sample_videos]
test_video_bytes = [open(join(TEST_ROOT, vid), 'rb').read() for vid in test_videos]
classes = [
"Normal/Other",
"COVID",
"Pneumonia",
]
loaded_models = {
"MC3": get_model("MC3_18")(),
"R3D": get_model("R3D18")(),
}
for name, model in loaded_models.items():
checkpoint = join(CKPT_ROOT, name + ".ckpt")
state = torch.load(checkpoint)["model"]
model.load_state_dict(state)
model = model.to(device)
model.eval()
model_card_mc3 = open(join(MARKDOWN_ROOT, "MC3.md")).read()
model_card_r3d = open(join(MARKDOWN_ROOT, "R3D.md")).read()
datacard = open(join(MARKDOWN_ROOT, "datacard.md")).read()
datasource = open(join(MARKDOWN_ROOT, "datasource.md")).read()
desc = open(join(MARKDOWN_ROOT, "covid_intro.md")).read()
def get_video_tensors(path):
vid = list(VideoReader(path).to_iter())
dim = (128, 128)
vid = [ cv2.resize(vid[i], dim, interpolation = cv2.INTER_AREA) \
for i in range(0, len(vid), 2)]
vid = np.array(vid)
vid = torch.from_numpy(vid)
vid = vid.float() / 255.0
vid = vid.permute(3, 0, 1, 2)
vid = vid.unsqueeze(0)
return vid
test_video_tensors = [get_video_tensors(join(TEST_ROOT, p)) for p in test_videos]
rand_shuffle = random.randint(0, 3)
inference_text = [("", []), ("", []), ("", [])]
ground_truth_res = [False, False, False]
def reset():
global inference_text
global ground_truth_res
inference_text = [("", []), ("", []), ("", [])]
ground_truth_res = [False, False, False]
def video_classification_page(state):
st.title("Classification of COVID-19 Based on Lung Ultra-sound")
# INTRO
col1, col2 = st.beta_columns(2)
col1.markdown(desc)
col2.image(join(IMG_ROOT, "vector_logo.jpg"))
# Data
st.markdown(datacard)
col1, col2 = st.beta_columns([1, 1])
col1.markdown(datasource)
col2.markdown("## Conceptual flow of the data collection and processing")
col2.image(join(IMG_ROOT, "conceptual_flow.png"))
# Data samples
example_expander = st.beta_expander("Data Samples")
cols = [2, 4, 4]
for i in range(0, len(sample_video_bytes), 2):
col0, col1, col2 = example_expander.beta_columns(cols)
col0.markdown("**{}**".format(classes[i // 2]))
col1.video(sample_video_bytes[i])
col2.video(sample_video_bytes[i + 1])
# Model
st.markdown("# Let's start with selecting a model!")
models = ("None", "Resnet 3D Model (R3D)",
"Mixed Convolutional Network (MC3)")
selected_model = st.selectbox("", models)
if len(selected_model) == 0 or selected_model == "None":
return
col1, col2 = st.beta_columns([2, 1])
if "MC3" in selected_model:
model_card = model_card_mc3
img_path = join(IMG_ROOT, "mc3.png")
model_key = "MC3"
else:
model_card = model_card_r3d
img_path = join(IMG_ROOT, "r3d.png")
model_key = "R3D"
col1.markdown(model_card)
col2.image(img_path, width=200, caption="Model Architecture")
# Live Demo
demo_expander = st.markdown("# Test the model on real (unseen) videos")
model_for_inference = loaded_models[model_key]
demo_expander = st.beta_expander("Test Samples")
if demo_expander.button("Reset", key="reset"):
reset()
cols = [4, 2, 2, 2]
for i in range(len(test_video_bytes)):
i = (i + rand_shuffle) % len(test_video_bytes)
col0, col1, col2, col3 = demo_expander.beta_columns(cols)
col0.video(test_video_bytes[i])
col1.markdown("__Take a guess below__")
user_pred = col1.selectbox("", ["I Don't Know"] + classes,
key="select{}".format(i))
model_pred = None
col2.markdown("---")
if col2.button("Test Video Against Model", key="pred{}".format(i)):
pred = model_for_inference(test_video_tensors[i].to(device))
pred_idx = torch.argmax(pred).item()
beta = 0.5
pred = pred * beta
pred = torch.nn.Softmax(dim=0)(pred.flatten()).tolist()
model_pred = classes[pred_idx]
prediction_text = ["{:<15}: {:.2f}%".format(cls, prob * 100) \
for cls, prob in zip(classes, pred)]
inference_text[i] = model_pred, prediction_text
model_pred, prediction_text = inference_text[i]
for t in prediction_text:
col2.write(t)
if model_pred:
col2.markdown("\n*__Prediction: {}__*\n".format(model_pred))
col3.markdown("---")
if col3.button("Show Ground Truth", key="gt{}".format(i)):
ground_truth_res[i] = True
if ground_truth_res[i]:
ground_truth = classes[i]
col3.write("Ground Truth:")
col3.write("__{}__".format(ground_truth))
col3.markdown("---")
if model_pred == ground_truth:
col3.write("Model is correct!!")
else:
col3.write("Model is wrong...")
col3.markdown("---")
if user_pred == ground_truth:
col3.write("You are correct!!")
else:
col3.write("You are wrong...")
| [] | [] | [
"CV_DEMO_DOC_ROOT"
] | [] | ["CV_DEMO_DOC_ROOT"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.