prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Copyright (c) 2011-2012 clevelandcoin Developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "checkpoints.h"
#include "db.h"
#include "net.h"
#include "init.h"
#include "ui_interface.h"
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
using namespace std;
using namespace boost;
//
// Global state
//
CCriticalSection cs_setpwalletRegistered;
set<CWallet*> setpwalletRegistered;
CCriticalSection cs_main;
CTxMemPool mempool;
unsigned int nTransactionsUpdated = 0;
map<uint256, CBlockIndex*> mapBlockIndex;
uint256 hashGenesisBlock("0x620995fa30c65100adb47dc3cb4a0badb1c522a9d8e62bbb630e8c3e6b9c1717");
static CBigNum bnProofOfWorkLimit(~uint256(0) >> 20); // clevelandcoin: starting difficulty is 1 / 2^12
CBlockIndex* pindexGenesisBlock = NULL;
int nBestHeight = -1;
CBigNum bnBestChainWork = 0;
CBigNum bnBestInvalidWork = 0;
uint256 hashBestChain = 0;
CBlockIndex* pindexBest = NULL;
int64 nTimeBestReceived = 0;
CMedianFilter<int> cPeerBlockCounts(5, 0); // Amount of blocks that other nodes claim to have
map<uint256, CBlock*> mapOrphanBlocks;
multimap<uint256, CBlock*> mapOrphanBlocksByPrev;
map<uint256, CDataStream*> mapOrphanTransactions;
map<uint256, map<uint256, CDataStream*> > mapOrphanTransactionsByPrev;
// Constant stuff for coinbase transactions we create:
CScript COINBASE_FLAGS;
const string strMessageMagic = "clevelandcoin Signed Message:\n";
double dHashesPerSec;
int64 nHPSTimerStart;
// Settings
int64 nTransactionFee = 0;
int64 nMinimumInputValue = CENT / 100;
//////////////////////////////////////////////////////////////////////////////
//
// dispatching functions
//
// These functions dispatch to one or all registered wallets
void RegisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.insert(pwalletIn);
}
}
void UnregisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.erase(pwalletIn);
}
}
// check whether the passed transaction is from us
bool static IsFromMe(CTransaction& tx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->IsFromMe(tx))
return true;
return false;
}
// get the wallet transaction with the given hash (if it exists)
bool static GetTransaction(const uint256& hashTx, CWalletTx& wtx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->GetTransaction(hashTx,wtx))
return true;
return false;
}
// erases transaction with the given hash from all wallets
void static EraseFromWallets(uint256 hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->EraseFromWallet(hash);
}
// make sure all wallets know about the given transaction, in the given block
void SyncWithWallets(const CTransaction& tx, const CBlock* pblock, bool fUpdate)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->AddToWalletIfInvolvingMe(tx, pblock, fUpdate);
}
// notify wallets about a new best chain
void static SetBestChain(const CBlockLocator& loc)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->SetBestChain(loc);
}
// notify wallets about an updated transaction
void static UpdatedTransaction(const uint256& hashTx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->UpdatedTransaction(hashTx);
}
// dump all wallets
void static PrintWallets(const CBlock& block)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->PrintWallet(block);
}
// notify wallets about an incoming inventory (for request counts)
void static Inventory(const uint256& hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->Inventory(hash);
}
// ask wallets to resend their transactions
void static ResendWalletTransactions()
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->ResendWalletTransactions();
}
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CDataStream& vMsg)
{
CTransaction tx;
CDataStream(vMsg) >> tx;
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
CDataStream* pvMsg = new CDataStream(vMsg);
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
if (pvMsg->size() > 5000)
{
printf("ignoring large orphan tx (size: %u, hash: %s)\n", pvMsg->size(), hash.ToString().substr(0,10).c_str());
delete pvMsg;
return false;
}
mapOrphanTransactions[hash] = pvMsg;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(make_pair(hash, pvMsg));
printf("stored orphan tx %s (mapsz %u)\n", hash.ToString().substr(0,10).c_str(),
mapOrphanTransactions.size());
return true;
}
void static EraseOrphanTx(uint256 hash)
{
if (!mapOrphanTransactions.count(hash))
return;
const CDataStream* pvMsg = mapOrphanTransactions[hash];
CTransaction tx;
CDataStream(*pvMsg) >> tx;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
mapOrphanTransactionsByPrev[txin.prevout.hash].erase(hash);
if (mapOrphanTransactionsByPrev[txin.prevout.hash].empty())
mapOrphanTransactionsByPrev.erase(txin.prevout.hash);
}
delete pvMsg;
mapOrphanTransactions.erase(hash);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, CDataStream*>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
//////////////////////////////////////////////////////////////////////////////
//
// CTransaction and CTxIndex
//
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout, CTxIndex& txindexRet)
{
SetNull();
if (!txdb.ReadTxIndex(prevout.hash, txindexRet))
return false;
if (!ReadFromDisk(txindexRet.pos))
return false;
if (prevout.n >= vout.size())
{
SetNull();
return false;
}
return true;
}
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout)
{
CTxIndex txindex;
return ReadFromDisk(txdb, prevout, txindex);
}
bool CTransaction::ReadFromDisk(COutPoint prevout)
{
CTxDB txdb("r");
CTxIndex txindex;
return ReadFromDisk(txdb, prevout, txindex);
}
bool CTransaction::IsStandard() const
{
if (nVersion > CTransaction::CURRENT_VERSION)
return false;
BOOST_FOREACH(const CTxIn& txin, vin)
{
// Biggest 'standard' txin is a 3-signature 3-of-3 CHECKMULTISIG
// pay-to-script-hash, which is 3 ~80-byte signatures, 3
// ~65-byte public keys, plus a few script ops.
if (txin.scriptSig.size() > 500)
return false;
if (!txin.scriptSig.IsPushOnly())
return false;
}
BOOST_FOREACH(const CTxOut& txout, vout)
if (!::IsStandard(txout.scriptPubKey))
return false;
return true;
}
//
// Check transaction inputs, and make sure any
// pay-to-script-hash transactions are evaluating IsStandard scripts
//
// Why bother? To avoid denial-of-service attacks; an attacker
// can submit a standard HASH... OP_EQUAL transaction,
// which will get accepted into blocks. The redemption
// script can be anything; an attacker could use a very
// expensive-to-check-upon-redemption script like:
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
//
bool CTransaction::AreInputsStandard(const MapPrevTx& mapInputs) const
{
if (IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
vector<vector<unsigned char> > vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig the
// IsStandard() call returns false
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, vin[i].scriptSig, *this, i, 0))
return false;
if (whichType == TX_SCRIPTHASH)
{
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end());
vector<vector<unsigned char> > vSolutions2;
txnouttype whichType2;
if (!Solver(subscript, whichType2, vSolutions2))
return false;
if (whichType2 == TX_SCRIPTHASH)
return false;
int tmpExpected;
tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
if (tmpExpected < 0)
return false;
nArgsExpected += tmpExpected;
}
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
return true;
}
unsigned int
CTransaction::GetLegacySigOpCount() const
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
{
if (fClient)
{
if (hashBlock == 0)
return 0;
}
else
{
CBlock blockTmp;
if (pblock == NULL)
{
// Load the block this tx is in
CTxIndex txindex;
if (!CTxDB("r").ReadTxIndex(GetHash(), txindex))
return 0;
if (!blockTmp.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos))
return 0;
pblock = &blockTmp;
}
// Update the tx's hashBlock
hashBlock = pblock->GetHash();
// Locate the transaction
for (nIndex = 0; nIndex < (int)pblock->vtx.size(); nIndex++)
if (pblock->vtx[nIndex] == *(CTransaction*)this)
break;
if (nIndex == (int)pblock->vtx.size())
{
vMerkleBranch.clear();
nIndex = -1;
printf("ERROR: SetMerkleBranch() : couldn't find tx in block\n");
return 0;
}
// Fill in merkle branch
vMerkleBranch = pblock->GetMerkleBranch(nIndex);
}
// Is the tx in a block that's in the main chain
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return pindexBest->nHeight - pindex->nHeight + 1;
}
bool CTransaction::CheckTransaction() const
{
// Basic checks that don't depend on any context
if (vin.empty())
return DoS(10, error("CTransaction::CheckTransaction() : vin empty"));
if (vout.empty())
return DoS(10, error("CTransaction::CheckTransaction() : vout empty"));
// Size limits
if (::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return DoS(100, error("CTransaction::CheckTransaction() : size limits failed"));
// Check for negative or overflow output values
int64 nValueOut = 0;
BOOST_FOREACH(const CTxOut& txout, vout)
{
if (txout.nValue < 0)
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue negative"));
if (txout.nValue > MAX_MONEY)
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue too high"));
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut))
return DoS(100, error("CTransaction::CheckTransaction() : txout total out of range"));
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, vin)
{
if (vInOutPoints.count(txin.prevout))
return false;
vInOutPoints.insert(txin.prevout);
}
if (IsCoinBase())
{
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100)
return DoS(100, error("CTransaction::CheckTransaction() : coinbase script size"));
}
else
{
BOOST_FOREACH(const CTxIn& txin, vin)
if (txin.prevout.IsNull())
return DoS(10, error("CTransaction::CheckTransaction() : prevout is null"));
}
return true;
}
bool CTxMemPool::accept(CTxDB& txdb, CTransaction &tx, bool fCheckInputs,
bool* pfMissingInputs)
{
if (pfMissingInputs)
*pfMissingInputs = false;
if (!tx.CheckTransaction())
return error("CTxMemPool::accept() : CheckTransaction failed");
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return tx.DoS(100, error("CTxMemPool::accept() : coinbase as individual tx"));
// To help v0.1.5 clients who would see it as a negative number
if ((int64)tx.nLockTime > std::numeric_limits<int>::max())
return error("CTxMemPool::accept() : not accepting nLockTime beyond 2038 yet");
// Rather not work on nonstandard transactions (unless -testnet)
if (!fTestNet && !tx.IsStandard())
return error("CTxMemPool::accept() : nonstandard transaction type");
// Do we already have it?
uint256 hash = tx.GetHash();
{
LOCK(cs);
if (mapTx.count(hash))
return false;
}
if (fCheckInputs)
if (txdb.ContainsTx(hash))
return false;
// Check for conflicts with in-memory transactions
CTransaction* ptxOld = NULL;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (mapNextTx.count(outpoint))
{
// Disable replacement feature for now
return false;
// Allow replacing with a newer version of the same transaction
if (i != 0)
return false;
ptxOld = mapNextTx[outpoint].ptx;
if (ptxOld->IsFinal())
return false;
if (!tx.IsNewerThan(*ptxOld))
return false;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (!mapNextTx.count(outpoint) || mapNextTx[outpoint].ptx != ptxOld)
return false;
}
break;
}
}
if (fCheckInputs)
{
MapPrevTx mapInputs;
map<uint256, CTxIndex> mapUnused;
bool fInvalid = false;
if (!tx.FetchInputs(txdb, mapUnused, false, false, mapInputs, fInvalid))
{
if (fInvalid)
return error("CTxMemPool::accept() : FetchInputs found invalid tx %s", hash.ToString().substr(0,10).c_str());
if (pfMissingInputs)
*pfMissingInputs = true;
return false;
}
// Check for non-standard pay-to-script-hash in inputs
if (!tx.AreInputsStandard(mapInputs) && !fTestNet)
return error("CTxMemPool::accept() : nonstandard transaction input");
// Note: if you modify this code to accept non-standard transactions, then
// you should add code here to check that the transaction does a
// reasonable number of ECDSA signature verifications.
int64 nFees = tx.GetValueIn(mapInputs)-tx.GetValueOut();
unsigned int nSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
// Don't accept it if it can't get into a block
int64 txMinFee = tx.GetMinFee(1000, true, GMF_RELAY);
if (nFees < txMinFee)
return error("CTxMemPool::accept() : not enough fees %s, %"PRI64d" < %"PRI64d,
hash.ToString().c_str(),
nFees, txMinFee);
// Continuously rate-limit free transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make other's transactions take longer to confirm.
if (nFees < MIN_RELAY_TX_FEE)
{
static CCriticalSection cs;
static double dFreeCount;
static int64 nLastTime;
int64 nNow = GetTime();
{
LOCK(cs);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount > GetArg("-limitfreerelay", 15)*10*1000 && !IsFromMe(tx))
return error("CTxMemPool::accept() : free transaction rejected by rate limiter");
if (fDebug)
printf("Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!tx.ConnectInputs(mapInputs, mapUnused, CDiskTxPos(1,1,1), pindexBest, false, false))
{
return error("CTxMemPool::accept() : ConnectInputs failed %s", hash.ToString().substr(0,10).c_str());
}
}
// Store transaction in memory
{
LOCK(cs);
if (ptxOld)
{
printf("CTxMemPool::accept() : replacing tx %s with new version\n", ptxOld->GetHash().ToString().c_str());
remove(*ptxOld);
}
addUnchecked(hash, tx);
}
///// are we sure this is ok when loading transactions or restoring block txes
// If updated, erase old tx from wallet
if (ptxOld)
EraseFromWallets(ptxOld->GetHash());
printf("CTxMemPool::accept() : accepted %s (poolsz %u)\n",
hash.ToString().c_str(),
mapTx.size());
return true;
}
bool CTransaction::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs, bool* pfMissingInputs)
{
return mempool.accept(txdb, *this, fCheckInputs, pfMissingInputs);
}
bool CTxMemPool::addUnchecked(const uint256& hash, CTransaction &tx)
{
// Add to memory pool without checking anything. Don't call this directly,
// call CTxMemPool::accept to properly check the transaction first.
{
mapTx[hash] = tx;
for (unsigned int i = 0; i < tx.vin.size(); i++)
mapNextTx[tx.vin[i].prevout] = CInPoint(&mapTx[hash], i);
nTransactionsUpdated++;
}
return true;
}
bool CTxMemPool::remove(CTransaction &tx)
{
// Remove transaction from memory pool
{
LOCK(cs);
uint256 hash = tx.GetHash();
if (mapTx.count(hash))
{
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapNextTx.erase(txin.prevout);
mapTx.erase(hash);
nTransactionsUpdated++;
}
}
return true;
}
void CTxMemPool::queryHashes(std::vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (map<uint256, CTransaction>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back((*mi).first);
}
int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
{
if (hashBlock == 0 || nIndex == -1)
return 0;
// Find the block it claims to be in
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
// Make sure the merkle branch connects to this block
if (!fMerkleVerified)
{
if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot)
return 0;
fMerkleVerified = true;
}
pindexRet = pindex;
return pindexBest->nHeight - pindex->nHeight + 1;
}
int CMerkleTx::GetBlocksToMaturity() const
{
if (!IsCoinBase())
return 0;
return max(0, (COINBASE_MATURITY+20) - GetDepthInMainChain());
}
bool CMerkleTx::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs)
{
if (fClient)
{
if (!IsInMainChain() && !ClientConnectInputs())
return false;
return CTransaction::AcceptToMemoryPool(txdb, false);
}
else
{
return CTransaction::AcceptToMemoryPool(txdb, fCheckInputs);
}
}
bool CMerkleTx::AcceptToMemoryPool()
{
CTxDB txdb("r");
return AcceptToMemoryPool(txdb);
}
bool CWalletTx::AcceptWalletTransaction(CTxDB& txdb, bool fCheckInputs)
{
{
LOCK(mempool.cs);
// Add previous supporting transactions first
BOOST_FOREACH(CMerkleTx& tx, vtxPrev)
{
if (!tx.IsCoinBase())
{
uint256 hash = tx.GetHash();
if (!mempool.exists(hash) && !txdb.ContainsTx(hash))
tx.AcceptToMemoryPool(txdb, fCheckInputs);
}
}
return AcceptToMemoryPool(txdb, fCheckInputs);
}
return false;
}
bool CWalletTx::AcceptWalletTransaction()
{
CTxDB txdb("r");
return AcceptWalletTransaction(txdb);
}
int CTxIndex::GetDepthInMainChain() const
{
// Read block header
CBlock block;
if (!block.ReadFromDisk(pos.nFile, pos.nBlockPos, false))
return 0;
// Find the block in the index
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(block.GetHash());
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return 1 + nBestHeight - pindex->nHeight;
}
// Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock
bool GetTransaction(const uint256 &hash, CTransaction &tx, uint256 &hashBlock)
{
{
LOCK(cs_main);
{
LOCK(mempool.cs);
if (mempool.exists(hash))
{
tx = mempool.lookup(hash);
return true;
}
}
CTxDB txdb("r");
CTxIndex txindex;
if (tx.ReadFromDisk(txdb, COutPoint(hash, 0), txindex))
{
CBlock block;
if (block.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos, false))
hashBlock = block.GetHash();
return true;
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
bool CBlock::ReadFromDisk(const CBlockIndex* pindex, bool fReadTransactions)
{
if (!fReadTransactions)
{
*this = pindex->GetBlockHeader();
return true;
}
if (!ReadFromDisk(pindex->nFile, pindex->nBlockPos, fReadTransactions))
return false;
if (GetHash() != pindex->GetBlockHash())
return error("CBlock::ReadFromDisk() : GetHash() doesn't match index");
return true;
}
uint256 static GetOrphanRoot(const CBlock* pblock)
{
// Work back to the first block in the orphan chain
while (mapOrphanBlocks.count(pblock->hashPrevBlock))
pblock = mapOrphanBlocks[pblock->hashPrevBlock];
return pblock->GetHash();
}
int64 static GetBlockValue(int nHeight, int64 nFees)
{
int64 nSubsidy = 50 * COIN;
// Subsidy is cut in half every 840000 blocks, which will occur approximately every 4 years
nSubsidy >>= (nHeight / 840000); // clevelandcoin: 840k blocks in ~4 years
return nSubsidy + nFees;
}
static const int64 nTargetTimespan = 3.5 * 24 * 60 * 60; // clevelandcoin: 3.5 days
static const int64 nTargetSpacing = 2.5 * 60; // clevelandcoin: 2.5 minutes
static const int64 nInterval = nTargetTimespan / nTargetSpacing;
//
// minimum amount of work that could possibly be required nTime after
// minimum work required was nBase
//
unsigned int ComputeMinWork(unsigned int nBase, int64 nTime)
{
// Testnet has min-difficulty blocks
// after nTargetSpacing*2 time between blocks:
if (fTestNet && nTime > nTargetSpacing*2)
return bnProofOfWorkLimit.GetCompact();
CBigNum bnResult;
bnResult.SetCompact(nBase);
while (nTime > 0 && bnResult < bnProofOfWorkLimit)
{
// Maximum 400% adjustment...
bnResult *= 4;
// ... in best-case exactly 4-times-normal target time
nTime -= nTargetTimespan*4;
}
if (bnResult > bnProofOfWorkLimit)
bnResult = bnProofOfWorkLimit;
return bnResult.GetCompact();
}
unsigned int static GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlock *pblock)
{
unsigned int nProofOfWorkLimit = bnProofOfWorkLimit.GetCompact();
// Genesis block
if (pindexLast == NULL)
return nProofOfWorkLimit;
// Only change once per interval
if ((pindexLast->nHeight+1) % nInterval != 0)
{
// Special difficulty rule for testnet:
if (fTestNet)
{
// If the new block's timestamp is more than 2* 10 minutes
// then allow mining of a min-difficulty block.
if (pblock->nTime > pindexLast->nTime + nTargetSpacing*2)
return nProofOfWorkLimit;
else
{
// Return the last non-special-min-difficulty-rules-block
const CBlockIndex* pindex = pindexLast;
while (pindex->pprev && pindex->nHeight % nInterval != 0 && pindex->nBits == nProofOfWorkLimit)
pindex = pindex->pprev;
return pindex->nBits;
}
}
return pindexLast->nBits;
}
// clevelandcoin: This fixes an issue where a 51% attack can change difficulty at will.
// Go back the full period unless it's the first retarget after genesis. Code courtesy of Art Forz
int blockstogoback = nInterval-1;
if ((pindexLast->nHeight+1) != nInterval)
blockstogoback = nInterval;
// Go back by what we want to be 14 days worth of blocks
const CBlockIndex* pindexFirst = pindexLast;
for (int i = 0; pindexFirst && i < blockstogoback; i++)
pindexFirst = pindexFirst->pprev;
assert(pindexFirst);
// Limit adjustment step
int64 nActualTimespan = pindexLast->GetBlockTime() - pindexFirst->GetBlockTime();
printf(" nActualTimespan = %"PRI64d" before bounds\n", nActualTimespan);
if (nActualTimespan < nTargetTimespan/4)
nActualTimespan = nTargetTimespan/4;
if (nActualTimespan > nTargetTimespan*4)
nActualTimespan = nTargetTimespan*4;
// Retarget
CBigNum bnNew;
bnNew.SetCompact(pindexLast->nBits);
bnNew *= nActualTimespan;
bnNew /= nTargetTimespan;
if (bnNew > bnProofOfWorkLimit)
bnNew = bnProofOfWorkLimit;
/// debug print
printf("GetNextWorkRequired RETARGET\n");
printf("nTargetTimespan = %"PRI64d" nActualTimespan = %"PRI64d"\n", nTargetTimespan, nActualTimespan);
printf("Before: %08x %s\n", pindexLast->nBits, CBigNum().SetCompact(pindexLast->nBits).getuint256().ToString().c_str());
printf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString().c_str());
return bnNew.GetCompact();
}
bool CheckProofOfWork(uint256 hash, unsigned int nBits)
{
CBigNum bnTarget;
bnTarget.SetCompact(nBits);
// Check range
if (bnTarget <= 0 || bnTarget > bnProofOfWorkLimit)
return error("CheckProofOfWork() : nBits below minimum work");
// Check proof of work matches claimed amount
if (hash > bnTarget.getuint256())
return error("CheckProofOfWork() : hash doesn't match nBits");
return true;
}
// Return maximum amount of blocks that other nodes claim to have
int GetNumBlocksOfPeers()
{
return std::max(cPeerBlockCounts.median(), Checkpoints::GetTotalBlocksEstimate());
}
bool IsInitialBlockDownload()
{
if (pindexBest == NULL || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
if (pindexBest != pindexLastBest)
{
pindexLastBest = pindexBest;
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (pindexNew->bnChainWork > bnBestInvalidWork)
{
bnBestInvalidWork = pindexNew->bnChainWork;
CTxDB().WriteBestInvalidWork(bnBestInvalidWork);
uiInterface.NotifyBlocksChanged();
}
printf("InvalidChainFound: invalid block=%s height=%d work=%s date=%s\n",
pindexNew->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->nHeight,
pindexNew->bnChainWork.ToString().c_str(), DateTimeStrFormat("%x %H:%M:%S",
pindexNew->GetBlockTime()).c_str());
printf("InvalidChainFound: current best=%s height=%d work=%s date=%s\n",
hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, bnBestChainWork.ToString().c_str(),
DateTimeStrFormat("%x %H:%M:%S", pindexBest->GetBlockTime()).c_str());
if (pindexBest && bnBestInvalidWork > bnBestChainWork + pindexBest->GetBlockWork() * 6)
printf("InvalidChainFound: WARNING: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.\n");
}
void CBlock::UpdateTime(const CBlockIndex* pindexPrev)
{
nTime = max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
// Updating time can change work required on testnet:
if (fTestNet)
nBits = GetNextWorkRequired(pindexPrev, this);
}
bool CTransaction::DisconnectInputs(CTxDB& txdb)
{
// Relinquish previous transactions' spent pointers
if (!IsCoinBase())
{
BOOST_FOREACH(const CTxIn& txin, vin)
{
COutPoint prevout = txin.prevout;
// Get prev txindex from disk
CTxIndex txindex;
if (!txdb.ReadTxIndex(prevout.hash, txindex))
return error("DisconnectInputs() : ReadTxIndex failed");
if (prevout.n >= txindex.vSpent.size())
return error("DisconnectInputs() : prevout.n out of range");
// Mark outpoint as not spent
txindex.vSpent[prevout.n].SetNull();
// Write back
if (!txdb.UpdateTxIndex(prevout.hash, txindex))
return error("DisconnectInputs() : UpdateTxIndex failed");
}
}
// Remove transaction from index
// This can fail if a duplicate of this transaction was in a chain that got
// reorganized away. This is only possible if this transaction was completely
// spent, so erasing it would be a no-op anway.
txdb.EraseTxIndex(*this);
return true;
}
bool CTransaction::FetchInputs(CTxDB& txdb, const map<uint256, CTxIndex>& mapTestPool,
bool fBlock, bool fMiner, MapPrevTx& inputsRet, bool& fInvalid)
{
// FetchInputs can return false either because we just haven't seen some inputs
// (in which case the transaction should be stored as an orphan)
// or because the transaction is malformed (in which case the transaction should
// be dropped). If tx is definitely invalid, fInvalid will be set to true.
fInvalid = false;
if (IsCoinBase())
return true; // Coinbase transactions have no inputs to fetch.
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
if (inputsRet.count(prevout.hash))
continue; // Got it already
// Read txindex
CTxIndex& txindex = inputsRet[prevout.hash].first;
bool fFound = true;
if ((fBlock || fMiner) && mapTestPool.count(prevout.hash))
{
// Get txindex from current proposed changes
txindex = mapTestPool.find(prevout.hash)->second;
}
else
{
// Read txindex from txdb
fFound = txdb.ReadTxIndex(prevout.hash, txindex);
}
if (!fFound && (fBlock || fMiner))
return fMiner ? false : error("FetchInputs() : %s prev tx %s index entry not found", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
// Read txPrev
CTransaction& txPrev = inputsRet[prevout.hash].second;
if (!fFound || txindex.pos == CDiskTxPos(1,1,1))
{
// Get prev tx from single transactions in memory
{
LOCK(mempool.cs);
if (!mempool.exists(prevout.hash))
return error("FetchInputs() : %s mempool Tx prev not found %s", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
txPrev = mempool.lookup(prevout.hash);
}
if (!fFound)
txindex.vSpent.resize(txPrev.vout.size());
}
else
{
// Get prev tx from disk
if (!txPrev.ReadFromDisk(txindex.pos))
return error("FetchInputs() : %s ReadFromDisk prev tx %s failed", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
}
}
// Make sure all prevout.n's are valid:
for (unsigned int i = 0; i < vin.size(); i++)
{
const COutPoint prevout = vin[i].prevout;
assert(inputsRet.count(prevout.hash) != 0);
const CTxIndex& txindex = inputsRet[prevout.hash].first;
const CTransaction& txPrev = inputsRet[prevout.hash].second;
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
{
// Revisit this if/when transaction replacement is implemented and allows
// adding inputs:
fInvalid = true;
return DoS(100, error("FetchInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
}
}
return true;
}
const CTxOut& CTransaction::GetOutputFor(const CTxIn& input, const MapPrevTx& inputs) const
{
MapPrevTx::const_iterator mi = inputs.find(input.prevout.hash);
if (mi == inputs.end())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.hash not found");
const CTransaction& txPrev = (mi->second).second;
if (input.prevout.n >= txPrev.vout.size())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.n out of range");
return txPrev.vout[input.prevout.n];
}
int64 CTransaction::GetValueIn(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
int64 nResult = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
nResult += GetOutputFor(vin[i], inputs).nValue;
}
return nResult;
}
unsigned int CTransaction::GetP2SHSigOpCount(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prevout = GetOutputFor(vin[i], inputs);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
}
return nSigOps;
}
bool CTransaction::ConnectInputs(MapPrevTx inputs,
map<uint256, CTxIndex>& mapTestPool, const CDiskTxPos& posThisTx,
const CBlockIndex* pindexBlock, bool fBlock, bool fMiner, bool fStrictPayToScriptHash)
{
// Take over previous transactions' spent pointers
// fBlock is true when this is called from AcceptBlock when a new best-block is added to the blockchain
// fMiner is true when called from the internal clevelandcoin miner
// ... both are false when called from CTransaction::AcceptToMemoryPool
if (!IsCoinBase())
{
int64 nValueIn = 0;
int64 nFees = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
return DoS(100, error("ConnectInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
// If prev is coinbase, check that it's matured
if (txPrev.IsCoinBase())
for (const CBlockIndex* pindex = pindexBlock; pindex && pindexBlock->nHeight - pindex->nHeight < COINBASE_MATURITY; pindex = pindex->pprev)
if (pindex->nBlockPos == txindex.pos.nBlockPos && pindex->nFile == txindex.pos.nFile)
return error("ConnectInputs() : tried to spend coinbase at depth %d", pindexBlock->nHeight - pindex->nHeight);
// Check for negative or overflow input values
nValueIn += txPrev.vout[prevout.n].nValue;
if (!MoneyRange(txPrev.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return DoS(100, error("ConnectInputs() : txin values out of range"));
}
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
// Check for conflicts (double-spend)
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!txindex.vSpent[prevout.n].IsNull())
return fMiner ? false : error("ConnectInputs() : %s prev tx already used at %s", GetHash().ToString().substr(0,10).c_str(), txindex.vSpent[prevout.n].ToString().c_str());
// Skip ECDSA signature verification when connecting blocks (fBlock=true)
// before the last blockchain checkpoint. This is safe because block merkle hashes are
// still computed and checked, and any change will be caught at the next checkpoint.
if (!(fBlock && (nBestHeight < Checkpoints::GetTotalBlocksEstimate())))
{
// Verify signature
if (!VerifySignature(txPrev, *this, i, fStrictPayToScriptHash, 0))
{
// only during transition phase for P2SH: do not invoke anti-DoS code for
// potentially old clients relaying bad P2SH transactions
if (fStrictPayToScriptHash && VerifySignature(txPrev, *this, i, false, 0))
return error("ConnectInputs() : %s P2SH VerifySignature failed", GetHash().ToString().substr(0,10).c_str());
return DoS(100,error("ConnectInputs() : %s VerifySignature failed", GetHash().ToString().substr(0,10).c_str()));
}
}
// Mark outpoints as spent
txindex.vSpent[prevout.n] = posThisTx;
// Write back
if (fBlock || fMiner)
{
mapTestPool[prevout.hash] = txindex;
}
}
if (nValueIn < GetValueOut())
return DoS(100, error("ConnectInputs() : %s value in < value out", GetHash().ToString().substr(0,10).c_str()));
// Tally transaction fees
int64 nTxFee = nValueIn - GetValueOut();
if (nTxFee < 0)
return DoS(100, error("ConnectInputs() : %s nTxFee < 0", GetHash().ToString().substr(0,10).c_str()));
nFees += nTxFee;
if (!MoneyRange(nFees))
return DoS(100, error("ConnectInputs() : nFees out of range"));
}
return true;
}
bool CTransaction::ClientConnectInputs()
{
if (IsCoinBase())
return false;
// Take over previous transactions' spent pointers
{
LOCK(mempool.cs);
int64 nValueIn = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
// Get prev tx from single transactions in memory
COutPoint prevout = vin[i].prevout;
if (!mempool.exists(prevout.hash))
return false;
CTransaction& txPrev = mempool.lookup(prevout.hash);
if (prevout.n >= txPrev.vout.size())
return false;
// Verify signature
if (!VerifySignature(txPrev, *this, i, true, 0))
return error("ConnectInputs() : VerifySignature failed");
///// this is redundant with the mempool.mapNextTx stuff,
///// not sure which I want to get rid of
///// this has to go away now that posNext is gone
// // Check for conflicts
// if (!txPrev.vout[prevout.n].posNext.IsNull())
// return error("ConnectInputs() : prev tx already used");
//
// // Flag outpoints as used
// txPrev.vout[prevout.n].posNext = posThisTx;
nValueIn += txPrev.vout[prevout.n].nValue;
if (!MoneyRange(txPrev.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return error("ClientConnectInputs() : txin values out of range");
}
if (GetValueOut() > nValueIn)
return false;
}
return true;
}
bool CBlock::DisconnectBlock(CTxDB& txdb, CBlockIndex* pindex)
{
// Disconnect in reverse order
for (int i = vtx.size()-1; i >= 0; i--)
if (!vtx[i].DisconnectInputs(txdb))
return false;
// Update block index on disk without changing it in memory.
// The memory index structure will be changed after the db commits.
if (pindex->pprev)
{
CDiskBlockIndex blockindexPrev(pindex->pprev);
blockindexPrev.hashNext = 0;
if (!txdb.WriteBlockIndex(blockindexPrev))
return error("DisconnectBlock() : WriteBlockIndex failed");
}
return true;
}
bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex)
{
// Check it again in case a previous version let a bad block in
if (!CheckBlock())
return false;
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction id's entirely.
// This rule applies to all blocks whose timestamp is after October 1, 2012, 0:00 UTC.
int64 nBIP30SwitchTime = 1349049600;
bool fEnforceBIP30 = (pindex->nTime > nBIP30SwitchTime);
// BIP16 didn't become active until October 1 2012
int64 nBIP16SwitchTime = 1349049600;
bool fStrictPayToScriptHash = (pindex->nTime >= nBIP16SwitchTime);
//// issue here: it doesn't know the version
unsigned int nTxPos = pindex->nBlockPos + ::GetSerializeSize(CBlock(), SER_DISK, CLIENT_VERSION) - 1 + GetSizeOfCompactSize(vtx.size());
map<uint256, CTxIndex> mapQueuedChanges;
int64 nFees = 0;
unsigned int nSigOps = 0;
BOOST_FOREACH(CTransaction& tx, vtx)
{
uint256 hashTx = tx.GetHash();
if (fEnforceBIP30) {
CTxIndex txindexOld;
if (txdb.ReadTxIndex(hashTx, txindexOld)) {
BOOST_FOREACH(CDiskTxPos &pos, txindexOld.vSpent)
if (pos.IsNull())
return false;
}
}
nSigOps += tx.GetLegacySigOpCount();
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("ConnectBlock() : too many sigops"));
CDiskTxPos posThisTx(pindex->nFile, pindex->nBlockPos, nTxPos);
nTxPos += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
MapPrevTx mapInputs;
if (!tx.IsCoinBase())
{
bool fInvalid;
if (!tx.FetchInputs(txdb, mapQueuedChanges, true, false, mapInputs, fInvalid))
return false;
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += tx.GetP2SHSigOpCount(mapInputs);
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("ConnectBlock() : too many sigops"));
}
nFees += tx.GetValueIn(mapInputs)-tx.GetValueOut();
if (!tx.ConnectInputs(mapInputs, mapQueuedChanges, posThisTx, pindex, true, false, fStrictPayToScriptHash))
return false;
}
mapQueuedChanges[hashTx] = CTxIndex(posThisTx, tx.vout.size());
}
// Write queued txindex changes
for (map<uint256, CTxIndex>::iterator mi = mapQueuedChanges.begin(); mi != mapQueuedChanges.end(); ++mi)
{
if (!txdb.UpdateTxIndex((*mi).first, (*mi).second))
return error("ConnectBlock() : UpdateTxIndex failed");
}
if (vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees))
return false;
// Update block index on disk without changing it in memory.
// The memory index structure will be changed after the db commits.
if (pindex->pprev)
{
CDiskBlockIndex blockindexPrev(pindex->pprev);
blockindexPrev.hashNext = pindex->GetBlockHash();
if (!txdb.WriteBlockIndex(blockindexPrev))
return error("ConnectBlock() : WriteBlockIndex failed");
}
// Watch for transactions paying to me
BOOST_FOREACH(CTransaction& tx, vtx)
SyncWithWallets(tx, this, true);
return true;
}
bool static Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
{
printf("REORGANIZE\n");
// Find the fork
CBlockIndex* pfork = pindexBest;
CBlockIndex* plonger = pindexNew;
while (pfork != plonger)
{
while (plonger->nHeight > pfork->nHeight)
if (!(plonger = plonger->pprev))
return error("Reorganize() : plonger->pprev is null");
if (pfork == plonger)
break;
if (!(pfork = pfork->pprev))
return error("Reorganize() : pfork->pprev is null");
}
// List of what to disconnect
vector<CBlockIndex*> vDisconnect;
for (CBlockIndex* pindex = pindexBest; pindex != pfork; pindex = pindex->pprev)
vDisconnect.push_back(pindex);
// List of what to connect
vector<CBlockIndex*> vConnect;
for (CBlockIndex* pindex = pindexNew; pindex != pfork; pindex = pindex->pprev)
vConnect.push_back(pindex);
reverse(vConnect.begin(), vConnect.end());
printf("REORGANIZE: Disconnect %i blocks; %s..%s\n", vDisconnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexBest->GetBlockHash().ToString().substr(0,20).c_str());
printf("REORGANIZE: Connect %i blocks; %s..%s\n", vConnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->GetBlockHash().ToString().substr(0,20).c_str());
// Disconnect shorter branch
vector<CTransaction> vResurrect;
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
{
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("Reorganize() : ReadFromDisk for disconnect failed");
if (!block.DisconnectBlock(txdb, pindex))
return error("Reorganize() : DisconnectBlock %s failed", pindex->GetBlockHash().ToString().substr(0,20).c_str());
// Queue memory transactions to resurrect
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!tx.IsCoinBase())
vResurrect.push_back(tx);
}
// Connect longer branch
vector<CTransaction> vDelete;
for (unsigned int i = 0; i < vConnect.size(); i++)
{
CBlockIndex* pindex = vConnect[i];
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("Reorganize() : ReadFromDisk for connect failed");
if (!block.ConnectBlock(txdb, pindex))
{
// Invalid block
return error("Reorganize() : ConnectBlock %s failed", pindex->GetBlockHash().ToString().substr(0,20).c_str());
}
// Queue memory transactions to delete
BOOST_FOREACH(const CTransaction& tx, block.vtx)
vDelete.push_back(tx);
}
if (!txdb.WriteHashBestChain(pindexNew->GetBlockHash()))
return error("Reorganize() : WriteHashBestChain failed");
// Make sure it's successfully written to disk before changing memory structure
if (!txdb.TxnCommit())
return error("Reorganize() : TxnCommit failed");
// Disconnect shorter branch
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
if (pindex->pprev)
pindex->pprev->pnext = NULL;
// Connect longer branch
BOOST_FOREACH(CBlockIndex* pindex, vConnect)
if (pindex->pprev)
pindex->pprev->pnext = pindex;
// Resurrect memory transactions that were in the disconnected branch
BOOST_FOREACH(CTransaction& tx, vResurrect)
tx.AcceptToMemoryPool(txdb, false);
// Delete redundant memory transactions that are in the connected branch
BOOST_FOREACH(CTransaction& tx, vDelete)
mempool.remove(tx);
printf("REORGANIZE: done\n");
return true;
}
// Called from inside SetBestChain: attaches a block to the new best chain being built
bool CBlock::SetBestChainInner(CTxDB& txdb, CBlockIndex *pindexNew)
{
uint256 hash = GetHash();
// Adding to current best branch
if (!ConnectBlock(txdb, pindexNew) || !txdb.WriteHashBestChain(hash))
{
txdb.TxnAbort();
InvalidChainFound(pindexNew);
return false;
}
if (!txdb.TxnCommit())
return error("SetBestChain() : TxnCommit failed");
// Add to current best branch
pindexNew->pprev->pnext = pindexNew;
// Delete redundant memory transactions
BOOST_FOREACH(CTransaction& tx, vtx)
mempool.remove(tx);
return true;
}
bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew)
{
uint256 hash = GetHash();
if (!txdb.TxnBegin())
return error("SetBestChain() : TxnBegin failed");
if (pindexGenesisBlock == NULL && hash == hashGenesisBlock)
{
txdb.WriteHashBestChain(hash);
if (!txdb.TxnCommit())
return error("SetBestChain() : TxnCommit failed");
pindexGenesisBlock = pindexNew;
}
else if (hashPrevBlock == hashBestChain)
{
if (!SetBestChainInner(txdb, pindexNew))
return error("SetBestChain() : SetBestChainInner failed");
}
else
{
// the first block in the new chain that will cause it to become the new best chain
CBlockIndex *pindexIntermediate = pindexNew;
// list of blocks that need to be connected afterwards
std::vector<CBlockIndex*> vpindexSecondary;
// Reorganize is costly in terms of db load, as it works in a single db transaction.
// Try to limit how much needs to be done inside
while (pindexIntermediate->pprev && pindexIntermediate->pprev->bnChainWork > pindexBest->bnChainWork)
{
vpindexSecondary.push_back(pindexIntermediate);
pindexIntermediate = pindexIntermediate->pprev;
}
if (!vpindexSecondary.empty())
printf("Postponing %i reconnects\n", vpindexSecondary.size());
// Switch to new best branch
if (!Reorganize(txdb, pindexIntermediate))
{
txdb.TxnAbort();
InvalidChainFound(pindexNew);
return error("SetBestChain() : Reorganize failed");
}
// Connect futher blocks
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vpindexSecondary)
{
CBlock block;
if (!block.ReadFromDisk(pindex))
{
printf("SetBestChain() : ReadFromDisk failed\n");
break;
}
if (!txdb.TxnBegin()) {
printf("SetBestChain() : TxnBegin 2 failed\n");
break;
}
// errors now are not fatal, we still did a reorganisation to a new chain in a valid way
if (!block.SetBestChainInner(txdb, pindex))
break;
}
}
// Update best block in wallet (so we can detect restored wallets)
bool fIsInitialDownload = IsInitialBlockDownload();
if (!fIsInitialDownload)
{
const CBlockLocator locator(pindexNew);
::SetBestChain(locator);
}
// New best block
hashBestChain = hash;
pindexBest = pindexNew;
nBestHeight = pindexBest->nHeight;
bnBestChainWork = pindexNew->bnChainWork;
nTimeBestReceived = GetTime();
nTransactionsUpdated++;
printf("SetBestChain: new best=%s height=%d work=%s date=%s\n",
hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, bnBestChainWork.ToString().c_str(),
DateTimeStrFormat("%x %H:%M:%S", pindexBest->GetBlockTime()).c_str());
// Check the version of the last 100 blocks to see if we need to upgrade:
if (!fIsInitialDownload)
{
int nUpgraded = 0;
const CBlockIndex* pindex = pindexBest;
for (int i = 0; i < 100 && pindex != NULL; i++)
{
if (pindex->nVersion > CBlock::CURRENT_VERSION)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
printf("SetBestChain: %d of last 100 blocks above version %d\n", nUpgraded, CBlock::CURRENT_VERSION);
// if (nUpgraded > 100/2)
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
// strMiscWarning = _("Warning: this version is obsolete, upgrade required");
}
std::string strCmd = GetArg("-blocknotify", "");
if (!fIsInitialDownload && !strCmd.empty())
{
boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
return true;
}
bool CBlock::AddToBlockIndex(unsigned int nFile, unsigned int nBlockPos)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return error("AddToBlockIndex() : %s already exists", hash.ToString().substr(0,20).c_str());
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(nFile, nBlockPos, *this);
if (!pindexNew)
return error("AddToBlockIndex() : new CBlockIndex failed");
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
map<uint256, CBlockIndex*>::iterator miPrev = mapBlockIndex.find(hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
}
pindexNew->bnChainWork = (pindexNew->pprev ? pindexNew->pprev->bnChainWork : 0) + pindexNew->GetBlockWork();
CTxDB txdb;
if (!txdb.TxnBegin())
return false;
txdb.WriteBlockIndex(CDiskBlockIndex(pindexNew));
if (!txdb.TxnCommit())
return false;
// New best
if (pindexNew->bnChainWork > bnBestChainWork)
if (!SetBestChain(txdb, pindexNew))
return false;
txdb.Close();
if (pindexNew == pindexBest)
{
// Notify UI to display prev block's coinbase if it was ours
static uint256 hashPrevBestCoinBase;
UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = vtx[0].GetHash();
}
uiInterface.NotifyBlocksChanged();
return true;
}
bool CBlock::CheckBlock() const
{
// These are checks that are independent of context
// that can be verified before saving an orphan block.
// Size limits
if (vtx.empty() || vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return DoS(100, error("CheckBlock() : size limits failed"));
// Special short-term limits to avoid 10,000 BDB lock limit:
if (GetBlockTime() < 1376568000) // stop enforcing 15 August 2013 noon GMT
{
// Rule is: #unique txids referenced <= 4,500
// ... to prevent 10,000 BDB lock exhaustion on old clients
set<uint256> setTxIn;
for (size_t i = 0; i < vtx.size(); i++)
{
setTxIn.insert(vtx[i].GetHash());
if (i == 0) continue; // skip coinbase txin
BOOST_FOREACH(const CTxIn& txin, vtx[i].vin)
setTxIn.insert(txin.prevout.hash);
}
size_t nTxids = setTxIn.size();
if (nTxids > 4500)
return error("CheckBlock() : 15 Aug maxlocks violation");
}
// Check proof of work matches claimed amount
if (!CheckProofOfWork(GetPoWHash(), nBits))
return DoS(50, error("CheckBlock() : proof of work failed"));
// Check timestamp
if (GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return error("CheckBlock() : block timestamp too far in the future");
// First transaction must be coinbase, the rest must not be
if (vtx.empty() || !vtx[0].IsCoinBase())
return DoS(100, error("CheckBlock() : first tx is not coinbase"));
for (unsigned int i = 1; i < vtx.size(); i++)
if (vtx[i].IsCoinBase())
return DoS(100, error("CheckBlock() : more than one coinbase"));
// Check transactions
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.CheckTransaction())
return DoS(tx.nDoS, error("CheckBlock() : CheckTransaction failed"));
// Check for duplicate txids. This is caught by ConnectInputs(),
// but catching it earlier avoids a potential DoS attack:
set<uint256> uniqueTx;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
uniqueTx.insert(tx.GetHash());
}
if (uniqueTx.size() != vtx.size())
return DoS(100, error("CheckBlock() : duplicate transaction"));
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
nSigOps += tx.GetLegacySigOpCount();
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"));
// Check merkleroot
if (hashMerkleRoot != BuildMerkleTree())
return DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"));
return true;
}
bool CBlock::AcceptBlock()
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return error("AcceptBlock() : block already in mapBlockIndex");
// Get prev block index
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
if (mi == mapBlockIndex.end())
return DoS(10, error("AcceptBlock() : prev block not found"));
CBlockIndex* pindexPrev = (*mi).second;
int nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (nBits != GetNextWorkRequired(pindexPrev, this))
return DoS(100, error("AcceptBlock() : incorrect proof of work"));
// Check timestamp against prev
if (GetBlockTime() <= pindexPrev->GetMedianTimePast())
return error("AcceptBlock() : block's timestamp is too early");
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.IsFinal(nHeight, GetBlockTime()))
return DoS(10, error("AcceptBlock() : contains a non-final transaction"));
// Check that the block chain matches the known block chain up to a checkpoint
if (!Checkpoints::CheckBlock(nHeight, hash))
return DoS(100, error("AcceptBlock() : rejected by checkpoint lockin at %d", nHeight));
// Write block to history file
if (!CheckDiskSpace(::GetSerializeSize(*this, SER_DISK, CLIENT_VERSION)))
return error("AcceptBlock() : out of disk space");
unsigned int nFile = -1;
unsigned int nBlockPos = 0;
if (!WriteToDisk(nFile, nBlockPos))
return error("AcceptBlock() : WriteToDisk failed");
if (!AddToBlockIndex(nFile, nBlockPos))
return error("AcceptBlock() : AddToBlockIndex failed");
// Relay inventory, but don't relay old inventory during initial block download
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
if (hashBestChain == hash)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
}
return true;
}
bool ProcessBlock(CNode* pfrom, CBlock* pblock)
{
// Check for duplicate
uint256 hash = pblock->GetHash();
if (mapBlockIndex.count(hash))
return error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString().substr(0,20).c_str());
if (mapOrphanBlocks.count(hash))
return error("ProcessBlock() : already have block (orphan) %s", hash.ToString().substr(0,20).c_str());
// Preliminary checks
if (!pblock->CheckBlock())
return error("ProcessBlock() : CheckBlock FAILED");
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(mapBlockIndex);
if (pcheckpoint && pblock->hashPrevBlock != hashBestChain)
{
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
if (deltaTime < 0)
{
if (pfrom)
pfrom->Misbehaving(100);
return error("ProcessBlock() : block with timestamp before last checkpoint");
}
CBigNum bnNewBlock;
bnNewBlock.SetCompact(pblock->nBits);
CBigNum bnRequired;
bnRequired.SetCompact(ComputeMinWork(pcheckpoint->nBits, deltaTime));
if (bnNewBlock > bnRequired)
{
if (pfrom)
pfrom->Misbehaving(100);
return error("ProcessBlock() : block with too little proof-of-work");
}
}
// If don't already have its previous block, shunt it off to holding area until we get it
if (!mapBlockIndex.count(pblock->hashPrevBlock))
{
printf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString().substr(0,20).c_str());
CBlock* pblock2 = new CBlock(*pblock);
mapOrphanBlocks.insert(make_pair(hash, pblock2));
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
// Ask this guy to fill in what we're missing
if (pfrom)
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(pblock2));
return true;
}
// Store to disk
if (!pblock->AcceptBlock())
return error("ProcessBlock() : AcceptBlock FAILED");
// Recursively process any orphan blocks that depended on this one
vector<uint256> vWorkQueue;
vWorkQueue.push_back(hash);
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (multimap<uint256, CBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
++mi)
{
CBlock* pblockOrphan = (*mi).second;
if (pblockOrphan->AcceptBlock())
vWorkQueue.push_back(pblockOrphan->GetHash());
mapOrphanBlocks.erase(pblockOrphan->GetHash());
delete pblockOrphan;
}
mapOrphanBlocksByPrev.erase(hashPrev);
}
printf("ProcessBlock: ACCEPTED\n");
return true;
}
bool CheckDiskSpace(uint64 nAdditionalBytes)
{
uint64 nFreeBytesAvailable = filesystem::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
{
fShutdown = true;
string strMessage = _("Warning: Disk space is low");
strMiscWarning = strMessage;
printf("*** %s\n", strMessage.c_str());
uiInterface.ThreadSafeMessageBox(strMessage, "clevelandcoin", CClientUIInterface::OK | CClientUIInterface::ICON_EXCLAMATION | CClientUIInterface::MODAL);
StartShutdown();
return false;
}
return true;
}
FILE* OpenBlockFile(unsigned int nFile, unsigned int nBlockPos, const char* pszMode)
{
if ((nFile < 1) || (nFile == (unsigned int) -1))
return NULL;
FILE* file = fopen((GetDataDir() / strprintf("blk%04d.dat", nFile)).string().c_str(), pszMode);
if (!file)
return NULL;
if (nBlockPos != 0 && !strchr(pszMode, 'a') && !strchr(pszMode, 'w'))
{
if (fseek(file, nBlockPos, SEEK_SET) != 0)
{
fclose(file);
return NULL;
}
}
return file;
}
static unsigned int nCurrentBlockFile = 1;
FILE* AppendBlockFile(unsigned int& nFileRet)
{
nFileRet = 0;
loop
{
FILE* file = OpenBlockFile(nCurrentBlockFile, 0, "ab");
if (!file)
return NULL;
if (fseek(file, 0, SEEK_END) != 0)
return NULL;
// FAT32 filesize max 4GB, fseek and ftell max 2GB, so we must stay under 2GB
if (ftell(file) < 0x7F000000 - MAX_SIZE)
{
nFileRet = nCurrentBlockFile;
return file;
}
fclose(file);
nCurrentBlockFile++;
}
}
bool LoadBlockIndex(bool fAllowNew)
{
if (fTestNet)
{
pchMessageStart[0] = 0xfc;
pchMessageStart[1] = 0xc1;
pchMessageStart[2] = 0xb7;
pchMessageStart[3] = 0xdc;
hashGenesisBlock = uint256("0xf5ae71e26c74beacc88382716aced69cddf3dffff24f384e1808905e0188f68f");
}
//
// Load block index
//
CTxDB txdb("cr");
if (!txdb.LoadBlockIndex())
return false;
txdb.Close();
//
// Init with genesis block
//
if (mapBlockIndex.empty())
{
if (!fAllowNew)
return false;
// Genesis Block:
// CBlock(hash=12a765e31ffd4059bada, PoW=0000050c34a64b415b6b, ver=1, hashPrevBlock=00000000000000000000, hashMerkleRoot=97ddfbbae6, nTime=1317972665, nBits=1e0ffff0, nNonce=2084524493, vtx=1)
// CTransaction(hash=97ddfbbae6, ver=1, vin.size=1, vout.size=1, nLockTime=0)
// CTxIn(COutPoint(0000000000, -1), coinbase 04ffff001d0104404e592054696d65732030352f4f63742f32303131205374657665204a6f62732c204170706c65e280997320566973696f6e6172792c2044696573206174203536)
// CTxOut(nValue=50.00000000, scriptPubKey=040184710fa689ad5023690c80f3a4)
// vMerkleTree: 97ddfbbae6
// Genesis block
const char* pszTimestamp = "Cleveland coin birth";
CTransaction txNew;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 486604799 << CBigNum(4) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].nValue = 50 * COIN;
txNew.vout[0].scriptPubKey = CScript() << ParseHex("040184710fa689ad5023690c80f3a49c8f13f8d45b8c857fbcbc8bc4a8e4d3eb4b10f4d4604fa08dce601aaf0f470216fe1b51850b4acf21b179c45070ac7b03a9") << OP_CHECKSIG;
CBlock block;
block.vtx.push_back(txNew);
block.hashPrevBlock = 0;
block.hashMerkleRoot = block.BuildMerkleTree();
block.nVersion = 1;
block.nTime = 1389575440;
block.nBits = 0x1e0ffff0;
block.nNonce = 387689626;
if (fTestNet)
{
block.nTime = 1317798646;
block.nNonce = 385270584;
}
//// debug print
printf("%s\n", block.GetHash().ToString().c_str());
printf("%s\n", hashGenesisBlock.ToString().c_str());
printf("%s\n", block.hashMerkleRoot.ToString().c_str());
assert(block.hashMerkleRoot == uint256("0xa02bf4cab7b33fc23a2d4cf4dada496887bf903b3f65a9bc67f346bb5204d67f"));
// If genesis block hash does not match, then generate new genesis hash.
if (true && block.GetHash() != hashGenesisBlock)
{
printf("Searching for genesis block...\n");
// This will figure out a valid hash and Nonce if you're
// creating a different genesis block:
uint256 hashTarget = CBigNum().SetCompact(block.nBits).getuint256();
uint256 thash;
char scratchpad[SCRYPT_SCRATCHPAD_SIZE];
loop
{
scrypt_1024_1_1_256_sp(BEGIN(block.nVersion), BEGIN(thash), scratchpad);
if (thash <= hashTarget)
break;
if ((block.nNonce & 0xFFF) == 0)
{
printf("nonce %08X: hash = %s (target = %s)\n", block.nNonce, thash.ToString().c_str(), hashTarget.ToString().c_str());
}
++block.nNonce;
if (block.nNonce == 0)
{
printf("NONCE WRAPPED, incrementing time\n");
++block.nTime;
}
}
printf("block.nTime = %u \n", block.nTime);
printf("block.nNonce = %u \n", block.nNonce);
printf("block.GetHash = %s\n", block.GetHash().ToString().c_str());
}
block.print();
assert(block.GetHash() == hashGenesisBlock);
// Start new block file
unsigned int nFile;
unsigned int nBlockPos;
if (!block.WriteToDisk(nFile, nBlockPos))
return error("LoadBlockIndex() : writing genesis block to disk failed");
if (!block.AddToBlockIndex(nFile, nBlockPos))
return error("LoadBlockIndex() : genesis block not accepted");
}
return true;
}
void PrintBlockTree()
{
// precompute tree structure
map<CBlockIndex*, vector<CBlockIndex*> > mapNext;
for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi)
{
CBlockIndex* pindex = (*mi).second;
mapNext[pindex->pprev].push_back(pindex);
// test
//while (rand() % 3 == 0)
// mapNext[pindex->pprev].push_back(pindex);
}
vector<pair<int, CBlockIndex*> > vStack;
vStack.push_back(make_pair(0, pindexGenesisBlock));
int nPrevCol = 0;
while (!vStack.empty())
{
int nCol = vStack.back().first;
CBlockIndex* pindex = vStack.back().second;
vStack.pop_back();
// print split or gap
if (nCol > nPrevCol)
{
for (int i = 0; i < nCol-1; i++)
printf("| ");
printf("|\\\n");
}
else if (nCol < nPrevCol)
{
for (int i = 0; i < nCol; i++)
printf("| ");
printf("|\n");
}
nPrevCol = nCol;
// print columns
for (int i = 0; i < nCol; i++)
printf("| ");
// print item
CBlock block;
block.ReadFromDisk(pindex);
printf("%d (%u,%u) %s %s tx %d",
pindex->nHeight,
pindex->nFile,
pindex->nBlockPos,
block.GetHash().ToString().substr(0,20).c_str(),
DateTimeStrFormat("%x %H:%M:%S", block.GetBlockTime()).c_str(),
block.vtx.size());
PrintWallets(block);
// put the main timechain first
vector<CBlockIndex*>& vNext = mapNext[pindex];
for (unsigned int i = 0; i < vNext.size(); i++)
{
if (vNext[i]->pnext)
{
swap(vNext[0], vNext[i]);
break;
}
}
// iterate children
for (unsigned int i = 0; i < vNext.size(); i++)
vStack.push_back(make_pair(nCol+i, vNext[i]));
}
}
bool LoadExternalBlockFile(FILE* fileIn)
{
int nLoaded = 0;
{
LOCK(cs_main);
try {
CAutoFile blkdat(fileIn, SER_DISK, CLIENT_VERSION);
unsigned int nPos = 0;
while (nPos != (unsigned int)-1 && blkdat.good() && !fRequestShutdown)
{
unsigned char pchData[65536];
do {
fseek(blkdat, nPos, SEEK_SET);
int nRead = fread(pchData, 1, sizeof(pchData), blkdat);
if (nRead <= 8)
{
nPos = (unsigned int)-1;
break;
}
void* nFind = memchr(pchData, pchMessageStart[0], nRead+1-sizeof(pchMessageStart));
if (nFind)
{
if (memcmp(nFind, pchMessageStart, sizeof(pchMessageStart))==0)
{
nPos += ((unsigned char*)nFind - pchData) + sizeof(pchMessageStart);
break;
}
nPos += ((unsigned char*)nFind - pchData) + 1;
}
else
nPos += sizeof(pchData) - sizeof(pchMessageStart) + 1;
} while(!fRequestShutdown);
if (nPos == (unsigned int)-1)
break;
fseek(blkdat, nPos, SEEK_SET);
unsigned int nSize;
blkdat >> nSize;
if (nSize > 0 && nSize <= MAX_BLOCK_SIZE)
{
CBlock block;
blkdat >> block;
if (ProcessBlock(NULL,&block))
{
nLoaded++;
nPos += 4 + nSize;
}
}
}
}
catch (std::exception &e) {
printf("%s() : Deserialize or I/O error caught during load\n",
__PRETTY_FUNCTION__);
}
}
printf("Loaded %i blocks from external file\n", nLoaded);
return nLoaded > 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// CAlert
//
map<uint256, CAlert> mapAlerts;
CCriticalSection cs_mapAlerts;
string GetWarnings(string strFor)
{
int nPriority = 0;
string strStatusBar;
string strRPC;
if (GetBoolArg("-testsafemode"))
strRPC = "test";
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
nPriority = 1000;
strStatusBar = strMiscWarning;
}
// Longer invalid proof-of-work chain
if (pindexBest && bnBestInvalidWork > bnBestChainWork + pindexBest->GetBlockWork() * 6)
{
nPriority = 2000;
strStatusBar = strRPC = "WARNING: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.";
}
// Alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.AppliesToMe() && alert.nPriority > nPriority)
{
nPriority = alert.nPriority;
strStatusBar = alert.strStatusBar;
}
}
}
if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings() : invalid parameter");
return "error";
}
CAlert CAlert::getAlertByHash(const uint256 &hash)
{
CAlert retval;
{
LOCK(cs_mapAlerts);
map<uint256, CAlert>::iterator mi = mapAlerts.find(hash);
if(mi != mapAlerts.end())
retval = mi->second;
}
return retval;
}
bool CAlert::ProcessAlert()
{
if (!CheckSignature())
return false;
if (!IsInEffect())
return false;
{
LOCK(cs_mapAlerts);
// Cancel previous alerts
for (map<uint256, CAlert>::iterator mi = mapAlerts.begin(); mi != mapAlerts.end();)
{
const CAlert& alert = (*mi).second;
if (Cancels(alert))
{
printf("cancelling alert %d\n", alert.nID);
uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED);
mapAlerts.erase(mi++);
}
else if (!alert.IsInEffect())
{
printf("expiring alert %d\n", alert.nID);
uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED);
mapAlerts.erase(mi++);
}
else
mi++;
}
// Check if this alert has been cancelled
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.Cancels(*this))
{
printf("alert already cancelled by %d\n", alert.nID);
return false;
}
}
// Add to mapAlerts
mapAlerts.insert(make_pair(GetHash(), *this));
// Notify UI if it applies to me
if(AppliesToMe())
uiInterface.NotifyAlertChanged(GetHash(), CT_NEW);
}
printf("accepted alert %d, AppliesToMe()=%d\n", nID, AppliesToMe());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(CTxDB& txdb, const CInv& inv)
{
switch (inv.type)
{
case MSG_TX:
{
bool txInMap = false;
{
LOCK(mempool.cs);
txInMap = (mempool.exists(inv.hash));
}
return txInMap ||
mapOrphanTransactions.count(inv.hash) ||
txdb.ContainsTx(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash) ||
mapOrphanBlocks.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ascii, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
unsigned char pchMessageStart[4] = { 0xfb, 0xc0, 0xb6, 0xdb }; // clevelandcoin: increase each by adding 2 to bitcoin's value.
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
{
static map<CService, CPubKey> mapReuseKey;
RandAddSeedPerfmon();
if (fDebug)
printf("received: %s (%d bytes)\n", strCommand.c_str(), vRecv.size());
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
printf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (strCommand == "version")
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
pfrom->Misbehaving(1);
return false;
}
int64 nTime;
CAddress addrMe;
CAddress addrFrom;
uint64 nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PROTO_VERSION)
{
// Since February 20, 2012, the protocol is initiated at version 209,
// and earlier versions are no longer supported
printf("partner %s using obsolete version %i; disconnecting\n", pfrom->addr.ToString().c_str(), pfrom->nVersion);
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty())
vRecv >> pfrom->strSubVer;
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
if (pfrom->fInbound && addrMe.IsRoutable())
{
pfrom->addrLocal = addrMe;
SeenLocal(addrMe);
}
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
printf("connected to self at %s, disconnecting\n", pfrom->addr.ToString().c_str());
pfrom->fDisconnect = true;
return true;
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
AddTimeData(pfrom->addr, nTime);
// Change version
pfrom->PushMessage("verack");
pfrom->vSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (!fNoListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
pfrom->PushAddress(addr);
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage("getaddr");
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
// Ask the first connected node for block updates
static int nAskedForBlocks = 0;
if (!pfrom->fClient && !pfrom->fOneShot &&
(pfrom->nVersion < NOBLKS_VERSION_START ||
pfrom->nVersion >= NOBLKS_VERSION_END) &&
(nAskedForBlocks < 1 || vNodes.size() <= 1))
{
nAskedForBlocks++;
pfrom->PushGetBlocks(pindexBest, uint256(0));
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom);
}
pfrom->fSuccessfullyConnected = true;
printf("receive version message: version %d, blocks=%d, us=%s, them=%s, peer=%s\n", pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString().c_str(), addrFrom.ToString().c_str(), pfrom->addr.ToString().c_str());
cPeerBlockCounts.input(pfrom->nStartingHeight);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
pfrom->Misbehaving(1);
return false;
}
else if (strCommand == "verack")
{
pfrom->vRecv.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
}
else if (strCommand == "addr")
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
pfrom->Misbehaving(20);
return error("message addr size() = %d", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64 nNow = GetAdjustedTime();
int64 nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
if (fShutdown)
return true;
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the setAddrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint64 hashAddr = addr.GetHash();
uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = hashRand ^ nPointer;
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == "inv")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > 50000)
{
pfrom->Misbehaving(20);
return error("message inv size() = %d", vInv.size());
}
// find last block in inv vector
unsigned int nLastBlock = (unsigned int)(-1);
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) {
if (vInv[vInv.size() - 1 - nInv].type == MSG_BLOCK) {
nLastBlock = vInv.size() - 1 - nInv;
break;
}
}
CTxDB txdb("r");
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
if (fShutdown)
return true;
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(txdb, inv);
if (fDebug)
printf(" got inventory: %s %s\n", inv.ToString().c_str(), fAlreadyHave ? "have" : "new");
if (!fAlreadyHave)
pfrom->AskFor(inv);
else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
} else if (nInv == nLastBlock) {
// In case we are on a very long side-chain, it is possible that we already have
// the last block in an inv bundle sent in response to getblocks. Try to detect
// this situation and push another getblocks to continue.
std::vector<CInv> vGetData(1,inv);
pfrom->PushGetBlocks(mapBlockIndex[inv.hash], uint256(0));
if (fDebug)
printf("force request: %s\n", inv.ToString().c_str());
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getdata")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > 50000)
{
pfrom->Misbehaving(20);
return error("message getdata size() = %d", vInv.size());
}
if (fDebugNet || (vInv.size() != 1))
printf("received getdata (%d invsz)\n", vInv.size());
BOOST_FOREACH(const CInv& inv, vInv)
{
if (fShutdown)
return true;
if (fDebugNet || (vInv.size() == 1))
printf("received getdata for: %s\n", inv.ToString().c_str());
if (inv.type == MSG_BLOCK)
{
// Send block from disk
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
CBlock block;
block.ReadFromDisk((*mi).second);
pfrom->PushMessage("block", block);
// Trigger them to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, hashBestChain));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue = 0;
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end())
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
}
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getblocks")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
// Find the last block the caller has in the main chain
CBlockIndex* pindex = locator.GetBlockIndex();
// Send the rest of the chain
if (pindex)
pindex = pindex->pnext;
int nLimit = 500;
printf("getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str(), nLimit);
for (; pindex; pindex = pindex->pnext)
{
if (pindex->GetBlockHash() == hashStop)
{
printf(" getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll make them
// getblocks the next batch of inventory.
printf(" getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == "getheaders")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = locator.GetBlockIndex();
if (pindex)
pindex = pindex->pnext;
}
vector<CBlock> vHeaders;
int nLimit = 2000;
printf("getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str());
for (; pindex; pindex = pindex->pnext)
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
pfrom->PushMessage("headers", vHeaders);
}
else if (strCommand == "tx")
{
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CDataStream vMsg(vRecv);
CTxDB txdb("r");
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
// Truncate messages to the size of the tx in them
unsigned int nSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
unsigned int oldSize = vMsg.size();
if (nSize < oldSize) {
vMsg.resize(nSize);
printf("truncating oversized TX %s (%u -> %u)\n",
tx.GetHash().ToString().c_str(),
oldSize, nSize);
}
bool fMissingInputs = false;
if (tx.AcceptToMemoryPool(txdb, true, &fMissingInputs))
{
SyncWithWallets(tx, NULL, true);
RelayMessage(inv, vMsg);
mapAlreadyAskedFor.erase(inv);
vWorkQueue.push_back(inv.hash);
vEraseQueue.push_back(inv.hash);
// Recursively process any orphan transactions that depended on this one
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (map<uint256, CDataStream*>::iterator mi = mapOrphanTransactionsByPrev[hashPrev].begin();
mi != mapOrphanTransactionsByPrev[hashPrev].end();
++mi)
{
const CDataStream& vMsg = *((*mi).second);
CTransaction tx;
CDataStream(vMsg) >> tx;
CInv inv(MSG_TX, tx.GetHash());
bool fMissingInputs2 = false;
if (tx.AcceptToMemoryPool(txdb, true, &fMissingInputs2))
{
printf(" accepted orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
SyncWithWallets(tx, NULL, true);
RelayMessage(inv, vMsg);
mapAlreadyAskedFor.erase(inv);
vWorkQueue.push_back(inv.hash);
vEraseQueue.push_back(inv.hash);
}
else if (!fMissingInputs2)
{
// invalid orphan
vEraseQueue.push_back(inv.hash);
printf(" removed invalid orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
}
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
AddOrphanTx(vMsg);
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nEvicted = LimitOrphanTxSize(MAX_ORPHAN_TRANSACTIONS);
if (nEvicted > 0)
printf("mapOrphan overflow, removed %u tx\n", nEvicted);
}
if (tx.nDoS) pfrom->Misbehaving(tx.nDoS);
}
else if (strCommand == "block")
{
CBlock block;
vRecv >> block;
printf("received block %s\n", block.GetHash().ToString().substr(0,20).c_str());
// block.print();
CInv inv(MSG_BLOCK, block.GetHash());
pfrom->AddInventoryKnown(inv);
if (ProcessBlock(pfrom, &block))
mapAlreadyAskedFor.erase(inv);
if (block.nDoS) pfrom->Misbehaving(block.nDoS);
}
else if (strCommand == "getaddr")
{
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = addrman.GetAddr();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == "checkorder")
{
uint256 hashReply;
vRecv >> hashReply;
if (!GetBoolArg("-allowreceivebyip"))
{
pfrom->PushMessage("reply", hashReply, (int)2, string(""));
return true;
}
CWalletTx order;
vRecv >> order;
/// we have a chance to check the order here
// Keep giving the same key to the same ip until they use it
if (!mapReuseKey.count(pfrom->addr))
pwalletMain->GetKeyFromPool(mapReuseKey[pfrom->addr], true);
// Send back approval of order and pubkey to use
CScript scriptPubKey;
scriptPubKey << mapReuseKey[pfrom->addr] << OP_CHECKSIG;
pfrom->PushMessage("reply", hashReply, (int)0, scriptPubKey);
}
else if (strCommand == "reply")
{
uint256 hashReply;
vRecv >> hashReply;
CRequestTracker tracker;
{
LOCK(pfrom->cs_mapRequests);
map<uint256, CRequestTracker>::iterator mi = pfrom->mapRequests.find(hashReply);
if (mi != pfrom->mapRequests.end())
{
tracker = (*mi).second;
pfrom->mapRequests.erase(mi);
}
}
if (!tracker.IsNull())
tracker.fn(tracker.param1, vRecv);
}
else if (strCommand == "ping")
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64 nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
pfrom->PushMessage("pong", nonce);
}
}
else if (strCommand == "alert")
{
CAlert alert;
vRecv >> alert;
if (alert.ProcessAlert())
{
// Relay
pfrom->setKnown.insert(alert.GetHash());
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
alert.RelayTo(pnode);
}
}
}
else
{
// Ignore unknown commands for extensibility
}
// Update the last seen time for this node's address
if (pfrom->fNetworkNode)
if (strCommand == "version" || strCommand == "addr" || strCommand == "inv" || strCommand == "getdata" || strCommand == "ping")
AddressCurrentlyConnected(pfrom->addr);
return true;
}
bool ProcessMessages(CNode* pfrom)
{
CDataStream& vRecv = pfrom->vRecv;
if (vRecv.empty())
return true;
//if (fDebug)
// printf("ProcessMessages(%u bytes)\n", vRecv.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
loop
{
// Don't bother if send buffer is too full to respond anyway
if (pfrom->vSend.size() >= SendBufferSize())
break;
// Scan for message start
CDataStream::iterator pstart = search(vRecv.begin(), vRecv.end(), BEGIN(pchMessageStart), END(pchMessageStart));
int nHeaderSize = vRecv.GetSerializeSize(CMessageHeader());
if (vRecv.end() - pstart < nHeaderSize)
{
if ((int)vRecv.size() > nHeaderSize)
{
printf("\n\nPROCESSMESSAGE MESSAGESTART NOT FOUND\n\n");
vRecv.erase(vRecv.begin(), vRecv.end() - nHeaderSize);
}
break;
}
if (pstart - vRecv.begin() > 0)
printf("\n\nPROCESSMESSAGE SKIPPED %d BYTES\n\n", pstart - vRecv.begin());
vRecv.erase(vRecv.begin(), pstart);
// Read header
vector<char> vHeaderSave(vRecv.begin(), vRecv.begin() + nHeaderSize);
CMessageHeader hdr;
vRecv >> hdr;
if (!hdr.IsValid())
{
printf("\n\nPROCESSMESSAGE: ERRORS IN HEADER %s\n\n\n", hdr.GetCommand().c_str());
continue;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
if (nMessageSize > MAX_SIZE)
{
printf("ProcessMessages(%s, %u bytes) : nMessageSize > MAX_SIZE\n", strCommand.c_str(), nMessageSize);
continue;
}
if (nMessageSize > vRecv.size())
{
// Rewind and wait for rest of message
vRecv.insert(vRecv.begin(), vHeaderSave.begin(), vHeaderSave.end());
break;
}
// Checksum
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
unsigned int nChecksum = 0;
memcpy(&nChecksum, &hash, sizeof(nChecksum));
if (nChecksum != hdr.nChecksum)
{
printf("ProcessMessages(%s, %u bytes) : CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n",
strCommand.c_str(), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
// Copy message to its own buffer
CDataStream vMsg(vRecv.begin(), vRecv.begin() + nMessageSize, vRecv.nType, vRecv.nVersion);
vRecv.ignore(nMessageSize);
// Process message
bool fRet = false;
try
{
{
LOCK(cs_main);
fRet = ProcessMessage(pfrom, strCommand, vMsg);
}
if (fShutdown)
return true;
}
catch (std::ios_base::failure& e)
{
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from underlength message on vRecv
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand.c_str(), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from overlong size
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught\n", strCommand.c_str(), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
printf("ProcessMessage(%s, %u bytes) FAILED\n", strCommand.c_str(), nMessageSize);
}
vRecv.Compact();
return true;
}
bool SendMessages(CNode* pto, bool fSendTrickle)
{
TRY_LOCK(cs_main, lockMain);
if (lockMain) {
// Don't send anything until we get their version message
if (pto->nVersion == 0)
return true;
// Keep-alive ping. We send a nonce of zero because we don't use it anywhere
// right now.
if (pto->nLastSend && GetTime() - pto->nLastSend > 30 * 60 && pto->vSend.empty()) {
uint64 nonce = 0;
if (pto->nVersion > BIP0031_VERSION)
pto->PushMessage("ping", nonce);
else
pto->PushMessage("ping");
}
// Resend wallet transactions that haven't gotten in a block yet
ResendWalletTransactions();
// Address refresh broadcast
static int64 nLastRebroadcast;
if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
{
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
// Periodically clear setAddrKnown to allow refresh broadcasts
if (nLastRebroadcast)
pnode->setAddrKnown.clear();
// Rebroadcast our address
if (!fNoListen)
{
CAddress addr = GetLocalAddress(&pnode->addr);
if (addr.IsRoutable())
pnode->PushAddress(addr);
}
}
}
nLastRebroadcast = GetTime();
}
//
// Message: addr
//
if (fSendTrickle)
{
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
// returns true if wasn't already contained in the set
if (pto->setAddrKnown.insert(addr).second)
{
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
pto->PushMessage("addr", vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
pto->PushMessage("addr", vAddr);
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
LOCK(pto->cs_inventory);
vInv.reserve(pto->vInventoryToSend.size());
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (pto->setInventoryKnown.count(inv))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint256 hashRand = inv.hash ^ hashSalt;
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((hashRand & 3) != 0);
// always trickle our own transactions
if (!fTrickleWait)
{
CWalletTx wtx;
if (GetTransaction(inv.hash, wtx))
if (wtx.fFromMe)
fTrickleWait = true;
}
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;<|fim▁hole|> }
}
// returns true if wasn't already contained in the set
if (pto->setInventoryKnown.insert(inv).second)
{
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage("inv", vInv);
vInv.clear();
}
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage("inv", vInv);
//
// Message: getdata
//
vector<CInv> vGetData;
int64 nNow = GetTime() * 1000000;
CTxDB txdb("r");
while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(txdb, inv))
{
if (fDebugNet)
printf("sending getdata: %s\n", inv.ToString().c_str());
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
pto->PushMessage("getdata", vGetData);
vGetData.clear();
}
mapAlreadyAskedFor[inv] = nNow;
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
pto->PushMessage("getdata", vGetData);
}
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// BitcoinMiner
//
int static FormatHashBlocks(void* pbuffer, unsigned int len)
{
unsigned char* pdata = (unsigned char*)pbuffer;
unsigned int blocks = 1 + ((len + 8) / 64);
unsigned char* pend = pdata + 64 * blocks;
memset(pdata + len, 0, 64 * blocks - len);
pdata[len] = 0x80;
unsigned int bits = len * 8;
pend[-1] = (bits >> 0) & 0xff;
pend[-2] = (bits >> 8) & 0xff;
pend[-3] = (bits >> 16) & 0xff;
pend[-4] = (bits >> 24) & 0xff;
return blocks;
}
static const unsigned int pSHA256InitState[8] =
{0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
void SHA256Transform(void* pstate, void* pinput, const void* pinit)
{
SHA256_CTX ctx;
unsigned char data[64];
SHA256_Init(&ctx);
for (int i = 0; i < 16; i++)
((uint32_t*)data)[i] = ByteReverse(((uint32_t*)pinput)[i]);
for (int i = 0; i < 8; i++)
ctx.h[i] = ((uint32_t*)pinit)[i];
SHA256_Update(&ctx, data, sizeof(data));
for (int i = 0; i < 8; i++)
((uint32_t*)pstate)[i] = ctx.h[i];
}
//
// ScanHash scans nonces looking for a hash with at least some zero bits.
// It operates on big endian data. Caller does the byte reversing.
// All input buffers are 16-byte aligned. nNonce is usually preserved
// between calls, but periodically or if nNonce is 0xffff0000 or above,
// the block is rebuilt and nNonce starts over at zero.
//
unsigned int static ScanHash_CryptoPP(char* pmidstate, char* pdata, char* phash1, char* phash, unsigned int& nHashesDone)
{
unsigned int& nNonce = *(unsigned int*)(pdata + 12);
for (;;)
{
// Crypto++ SHA-256
// Hash pdata using pmidstate as the starting state into
// preformatted buffer phash1, then hash phash1 into phash
nNonce++;
SHA256Transform(phash1, pdata, pmidstate);
SHA256Transform(phash, phash1, pSHA256InitState);
// Return the nonce if the hash has at least some zero bits,
// caller will check if it has enough to reach the target
if (((unsigned short*)phash)[14] == 0)
return nNonce;
// If nothing found after trying for a while, return -1
if ((nNonce & 0xffff) == 0)
{
nHashesDone = 0xffff+1;
return (unsigned int) -1;
}
}
}
// Some explaining would be appreciated
class COrphan
{
public:
CTransaction* ptx;
set<uint256> setDependsOn;
double dPriority;
COrphan(CTransaction* ptxIn)
{
ptx = ptxIn;
dPriority = 0;
}
void print() const
{
printf("COrphan(hash=%s, dPriority=%.1f)\n", ptx->GetHash().ToString().substr(0,10).c_str(), dPriority);
BOOST_FOREACH(uint256 hash, setDependsOn)
printf(" setDependsOn %s\n", hash.ToString().substr(0,10).c_str());
}
};
uint64 nLastBlockTx = 0;
uint64 nLastBlockSize = 0;
CBlock* CreateNewBlock(CReserveKey& reservekey)
{
CBlockIndex* pindexPrev = pindexBest;
// Create new block
auto_ptr<CBlock> pblock(new CBlock());
if (!pblock.get())
return NULL;
// Create coinbase tx
CTransaction txNew;
txNew.vin.resize(1);
txNew.vin[0].prevout.SetNull();
txNew.vout.resize(1);
txNew.vout[0].scriptPubKey << reservekey.GetReservedKey() << OP_CHECKSIG;
// Add our coinbase tx as first transaction
pblock->vtx.push_back(txNew);
// Collect memory pool transactions into the block
int64 nFees = 0;
{
LOCK2(cs_main, mempool.cs);
CTxDB txdb("r");
// Priority order to process transactions
list<COrphan> vOrphan; // list memory doesn't move
map<uint256, vector<COrphan*> > mapDependers;
multimap<double, CTransaction*> mapPriority;
for (map<uint256, CTransaction>::iterator mi = mempool.mapTx.begin(); mi != mempool.mapTx.end(); ++mi)
{
CTransaction& tx = (*mi).second;
if (tx.IsCoinBase() || !tx.IsFinal())
continue;
COrphan* porphan = NULL;
double dPriority = 0;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
// Read prev transaction
CTransaction txPrev;
CTxIndex txindex;
if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))
{
// Has to wait for dependencies
if (!porphan)
{
// Use list for automatic deletion
vOrphan.push_back(COrphan(&tx));
porphan = &vOrphan.back();
}
mapDependers[txin.prevout.hash].push_back(porphan);
porphan->setDependsOn.insert(txin.prevout.hash);
continue;
}
int64 nValueIn = txPrev.vout[txin.prevout.n].nValue;
// Read block header
int nConf = txindex.GetDepthInMainChain();
dPriority += (double)nValueIn * nConf;
if (fDebug && GetBoolArg("-printpriority"))
printf("priority nValueIn=%-12"PRI64d" nConf=%-5d dPriority=%-20.1f\n", nValueIn, nConf, dPriority);
}
// Priority is sum(valuein * age) / txsize
dPriority /= ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
if (porphan)
porphan->dPriority = dPriority;
else
mapPriority.insert(make_pair(-dPriority, &(*mi).second));
if (fDebug && GetBoolArg("-printpriority"))
{
printf("priority %-20.1f %s\n%s", dPriority, tx.GetHash().ToString().substr(0,10).c_str(), tx.ToString().c_str());
if (porphan)
porphan->print();
printf("\n");
}
}
// Collect transactions into block
map<uint256, CTxIndex> mapTestPool;
uint64 nBlockSize = 1000;
uint64 nBlockTx = 0;
int nBlockSigOps = 100;
while (!mapPriority.empty())
{
// Take highest priority transaction off priority queue
double dPriority = -(*mapPriority.begin()).first;
CTransaction& tx = *(*mapPriority.begin()).second;
mapPriority.erase(mapPriority.begin());
// Size limits
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
if (nBlockSize + nTxSize >= MAX_BLOCK_SIZE_GEN)
continue;
// Legacy limits on sigOps:
unsigned int nTxSigOps = tx.GetLegacySigOpCount();
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
// Transaction fee required depends on block size
// clevelandcoind: Reduce the exempted free transactions to 500 bytes (from Bitcoin's 3000 bytes)
bool fAllowFree = (nBlockSize + nTxSize < 1500 || CTransaction::AllowFree(dPriority));
int64 nMinFee = tx.GetMinFee(nBlockSize, fAllowFree, GMF_BLOCK);
// Connecting shouldn't fail due to dependency on other memory pool transactions
// because we're already processing them in order of dependency
map<uint256, CTxIndex> mapTestPoolTmp(mapTestPool);
MapPrevTx mapInputs;
bool fInvalid;
if (!tx.FetchInputs(txdb, mapTestPoolTmp, false, true, mapInputs, fInvalid))
continue;
int64 nTxFees = tx.GetValueIn(mapInputs)-tx.GetValueOut();
if (nTxFees < nMinFee)
continue;
nTxSigOps += tx.GetP2SHSigOpCount(mapInputs);
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
if (!tx.ConnectInputs(mapInputs, mapTestPoolTmp, CDiskTxPos(1,1,1), pindexPrev, false, true))
continue;
mapTestPoolTmp[tx.GetHash()] = CTxIndex(CDiskTxPos(1,1,1), tx.vout.size());
swap(mapTestPool, mapTestPoolTmp);
// Added
pblock->vtx.push_back(tx);
nBlockSize += nTxSize;
++nBlockTx;
nBlockSigOps += nTxSigOps;
nFees += nTxFees;
// Add transactions that depend on this one to the priority queue
uint256 hash = tx.GetHash();
if (mapDependers.count(hash))
{
BOOST_FOREACH(COrphan* porphan, mapDependers[hash])
{
if (!porphan->setDependsOn.empty())
{
porphan->setDependsOn.erase(hash);
if (porphan->setDependsOn.empty())
mapPriority.insert(make_pair(-porphan->dPriority, porphan->ptx));
}
}
}
}
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
printf("CreateNewBlock(): total size %lu\n", nBlockSize);
}
pblock->vtx[0].vout[0].nValue = GetBlockValue(pindexPrev->nHeight+1, nFees);
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
pblock->UpdateTime(pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock.get());
pblock->nNonce = 0;
return pblock.release();
}
void IncrementExtraNonce(CBlock* pblock, CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock)
{
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
pblock->vtx[0].vin[0].scriptSig = (CScript() << pblock->nTime << CBigNum(nExtraNonce)) + COINBASE_FLAGS;
assert(pblock->vtx[0].vin[0].scriptSig.size() <= 100);
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
}
void FormatHashBuffers(CBlock* pblock, char* pmidstate, char* pdata, char* phash1)
{
//
// Prebuild hash buffers
//
struct
{
struct unnamed2
{
int nVersion;
uint256 hashPrevBlock;
uint256 hashMerkleRoot;
unsigned int nTime;
unsigned int nBits;
unsigned int nNonce;
}
block;
unsigned char pchPadding0[64];
uint256 hash1;
unsigned char pchPadding1[64];
}
tmp;
memset(&tmp, 0, sizeof(tmp));
tmp.block.nVersion = pblock->nVersion;
tmp.block.hashPrevBlock = pblock->hashPrevBlock;
tmp.block.hashMerkleRoot = pblock->hashMerkleRoot;
tmp.block.nTime = pblock->nTime;
tmp.block.nBits = pblock->nBits;
tmp.block.nNonce = pblock->nNonce;
FormatHashBlocks(&tmp.block, sizeof(tmp.block));
FormatHashBlocks(&tmp.hash1, sizeof(tmp.hash1));
// Byte swap all the input buffer
for (unsigned int i = 0; i < sizeof(tmp)/4; i++)
((unsigned int*)&tmp)[i] = ByteReverse(((unsigned int*)&tmp)[i]);
// Precalc the first half of the first hash, which stays constant
SHA256Transform(pmidstate, &tmp.block, pSHA256InitState);
memcpy(pdata, &tmp.block, 128);
memcpy(phash1, &tmp.hash1, 64);
}
bool CheckWork(CBlock* pblock, CWallet& wallet, CReserveKey& reservekey)
{
uint256 hash = pblock->GetPoWHash();
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
if (hash > hashTarget)
return false;
//// debug print
printf("BitcoinMiner:\n");
printf("proof-of-work found \n hash: %s \ntarget: %s\n", hash.GetHex().c_str(), hashTarget.GetHex().c_str());
pblock->print();
printf("generated %s\n", FormatMoney(pblock->vtx[0].vout[0].nValue).c_str());
// Found a solution
{
LOCK(cs_main);
if (pblock->hashPrevBlock != hashBestChain)
return error("BitcoinMiner : generated block is stale");
// Remove key from key pool
reservekey.KeepKey();
// Track how many getdata requests this block gets
{
LOCK(wallet.cs_wallet);
wallet.mapRequestCount[pblock->GetHash()] = 0;
}
// Process this block the same as if we had received it from another node
if (!ProcessBlock(NULL, pblock))
return error("BitcoinMiner : ProcessBlock, block not accepted");
}
return true;
}
void static ThreadBitcoinMiner(void* parg);
static bool fGenerateBitcoins = false;
static bool fLimitProcessors = false;
static int nLimitProcessors = -1;
void static BitcoinMiner(CWallet *pwallet)
{
printf("BitcoinMiner started\n");
SetThreadPriority(THREAD_PRIORITY_LOWEST);
// Make this thread recognisable as the mining thread
RenameThread("bitcoin-miner");
// Each thread has its own key and counter
CReserveKey reservekey(pwallet);
unsigned int nExtraNonce = 0;
while (fGenerateBitcoins)
{
if (fShutdown)
return;
while (vNodes.empty() || IsInitialBlockDownload())
{
Sleep(1000);
if (fShutdown)
return;
if (!fGenerateBitcoins)
return;
}
//
// Create new block
//
unsigned int nTransactionsUpdatedLast = nTransactionsUpdated;
CBlockIndex* pindexPrev = pindexBest;
auto_ptr<CBlock> pblock(CreateNewBlock(reservekey));
if (!pblock.get())
return;
IncrementExtraNonce(pblock.get(), pindexPrev, nExtraNonce);
printf("Running BitcoinMiner with %d transactions in block\n", pblock->vtx.size());
//
// Prebuild hash buffers
//
char pmidstatebuf[32+16]; char* pmidstate = alignup<16>(pmidstatebuf);
char pdatabuf[128+16]; char* pdata = alignup<16>(pdatabuf);
char phash1buf[64+16]; char* phash1 = alignup<16>(phash1buf);
FormatHashBuffers(pblock.get(), pmidstate, pdata, phash1);
unsigned int& nBlockTime = *(unsigned int*)(pdata + 64 + 4);
unsigned int& nBlockBits = *(unsigned int*)(pdata + 64 + 8);
//unsigned int& nBlockNonce = *(unsigned int*)(pdata + 64 + 12);
//
// Search
//
int64 nStart = GetTime();
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
loop
{
unsigned int nHashesDone = 0;
//unsigned int nNonceFound;
uint256 thash;
char scratchpad[SCRYPT_SCRATCHPAD_SIZE];
loop
{
scrypt_1024_1_1_256_sp(BEGIN(pblock->nVersion), BEGIN(thash), scratchpad);
if (thash <= hashTarget)
{
// Found a solution
SetThreadPriority(THREAD_PRIORITY_NORMAL);
CheckWork(pblock.get(), *pwalletMain, reservekey);
SetThreadPriority(THREAD_PRIORITY_LOWEST);
break;
}
pblock->nNonce += 1;
nHashesDone += 1;
if ((pblock->nNonce & 0xFF) == 0)
break;
}
// Meter hashes/sec
static int64 nHashCounter;
if (nHPSTimerStart == 0)
{
nHPSTimerStart = GetTimeMillis();
nHashCounter = 0;
}
else
nHashCounter += nHashesDone;
if (GetTimeMillis() - nHPSTimerStart > 4000)
{
static CCriticalSection cs;
{
LOCK(cs);
if (GetTimeMillis() - nHPSTimerStart > 4000)
{
dHashesPerSec = 1000.0 * nHashCounter / (GetTimeMillis() - nHPSTimerStart);
nHPSTimerStart = GetTimeMillis();
nHashCounter = 0;
string strStatus = strprintf(" %.0f khash/s", dHashesPerSec/1000.0);
static int64 nLogTime;
if (GetTime() - nLogTime > 30 * 60)
{
nLogTime = GetTime();
printf("%s ", DateTimeStrFormat("%x %H:%M", GetTime()).c_str());
printf("hashmeter %3d CPUs %6.0f khash/s\n", vnThreadsRunning[THREAD_MINER], dHashesPerSec/1000.0);
}
}
}
}
// Check for stop or if block needs to be rebuilt
if (fShutdown)
return;
if (!fGenerateBitcoins)
return;
if (fLimitProcessors && vnThreadsRunning[THREAD_MINER] > nLimitProcessors)
return;
if (vNodes.empty())
break;
if (pblock->nNonce >= 0xffff0000)
break;
if (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)
break;
if (pindexPrev != pindexBest)
break;
// Update nTime every few seconds
pblock->UpdateTime(pindexPrev);
nBlockTime = ByteReverse(pblock->nTime);
if (fTestNet)
{
// Changing pblock->nTime can change work required on testnet:
nBlockBits = ByteReverse(pblock->nBits);
hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
}
}
}
}
void static ThreadBitcoinMiner(void* parg)
{
CWallet* pwallet = (CWallet*)parg;
try
{
vnThreadsRunning[THREAD_MINER]++;
BitcoinMiner(pwallet);
vnThreadsRunning[THREAD_MINER]--;
}
catch (std::exception& e) {
vnThreadsRunning[THREAD_MINER]--;
PrintException(&e, "ThreadBitcoinMiner()");
} catch (...) {
vnThreadsRunning[THREAD_MINER]--;
PrintException(NULL, "ThreadBitcoinMiner()");
}
nHPSTimerStart = 0;
if (vnThreadsRunning[THREAD_MINER] == 0)
dHashesPerSec = 0;
printf("ThreadBitcoinMiner exiting, %d threads remaining\n", vnThreadsRunning[THREAD_MINER]);
}
void GenerateBitcoins(bool fGenerate, CWallet* pwallet)
{
fGenerateBitcoins = fGenerate;
nLimitProcessors = GetArg("-genproclimit", -1);
if (nLimitProcessors == 0)
fGenerateBitcoins = false;
fLimitProcessors = (nLimitProcessors != -1);
if (fGenerate)
{
int nProcessors = boost::thread::hardware_concurrency();
printf("%d processors\n", nProcessors);
if (nProcessors < 1)
nProcessors = 1;
if (fLimitProcessors && nProcessors > nLimitProcessors)
nProcessors = nLimitProcessors;
int nAddThreads = nProcessors - vnThreadsRunning[THREAD_MINER];
printf("Starting %d BitcoinMiner threads\n", nAddThreads);
for (int i = 0; i < nAddThreads; i++)
{
if (!CreateThread(ThreadBitcoinMiner, pwallet))
printf("Error: CreateThread(ThreadBitcoinMiner) failed\n");
Sleep(10);
}
}
}<|fim▁end|> | |
<|file_name|>Helper.cpp<|end_file_name|><|fim▁begin|>#include "stdafx.h"
#include "Helper.h"
#include "common.h"
Helper::Helper(void)
{
x = rand() % wide;
y = 0;
type = rand() % 5;
exist = true;
}
Helper::~Helper(void)
{
}
//wide
//height
void Helper::get(Player* p)
{
y += 2;
if (y >= height)//ÅжÏÊÇ·ñ³öÁËÆÁÄ»
{
y = height-1;
exist = false;
}
if(exist && x >= p->x-p->size && x <= p->x + p->size && y >= p->y-p->size && y <= p->y+p->size)//ײÉÏÍæ¼Ò
{
if(type == 0){
p->BombState = 3;
}
else if(type == 1){<|fim▁hole|> else if(type == 2){
p->BombState = 2;
//p->PlaneState = 0;
}
else if(type == 3){
p->hp ++;
}
else if(type == 4){
if(p->PlaneState <= 1)
p->PlaneState ++;
p->size = 40;
}
exist = false;
}
}
//Laser,Bomb,General,Hp,Protect
void Helper::update(Player* p)
{
get(p);//Laser
if(!exist) return;
if(type == 0){
SelectObject(bufdc ,Laser);
TransparentBltU(mdc , x, y ,20 , 20 , bufdc , 0, 0, 20, 20, RGB(255,255,255));
//BitBlt(mdc , x, y , 10 , 10 ,bufdc , 0 , 0 ,SRCCOPY);
}
else if(type == 1){
SelectObject(bufdc ,Bomb);
TransparentBltU(mdc , x, y ,20 , 20 , bufdc , 0, 0, 20, 20, RGB(255,255,255));
//BitBlt(mdc , x, y , 10 , 10 ,bufdc , 0 , 0 ,SRCCOPY);
}
else if(type == 2){
SelectObject(bufdc ,General);
TransparentBltU(mdc , x, y ,20 , 20 , bufdc , 0, 0, 20, 20, RGB(255,255,255));
//BitBlt(mdc , x, y , 10 , 10 ,bufdc , 0 , 0 ,SRCCOPY);
}
else if(type == 3){
SelectObject(bufdc ,Hp);
TransparentBltU(mdc , x, y ,20 , 20 , bufdc , 0, 0, 20, 20, RGB(255,255,255));
//BitBlt(mdc , x, y , 10 , 10 ,bufdc , 0 , 0 ,SRCCOPY);
}
else if(type == 4){
SelectObject(bufdc ,Protect);
TransparentBltU(mdc , x, y ,20 , 20 , bufdc , 0, 0, 20, 20, RGB(255,255,255));
//BitBlt(mdc , x, y , 10 , 10 ,bufdc , 0 , 0 ,SRCCOPY);
}
}<|fim▁end|> | p->bombnum ++;
} |
<|file_name|>express.js<|end_file_name|><|fim▁begin|>let express = require('express');
let bodyParser = require('body-parser');
let path = require('path');<|fim▁hole|>module.exports = function() {
let app = express();
app.use(bodyParser.urlencoded({
extended: true
}));
app.use(bodyParser.json());
app.use('/gallery', express.static('public'));
app.get('/gallery', function(req,res) {
res.sendFile(path.join(__dirname + '/../index.html'));
});
return app;
};<|fim▁end|> | |
<|file_name|>airflow.py<|end_file_name|><|fim▁begin|>from shutit_module import ShutItModule
import base64
class openshift_airflow(ShutItModule):
def build(self, shutit):
shutit.send('cd /tmp/openshift_vm')
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su -',password='vagrant',note='Become root (there is a problem logging in as admin with the vagrant user')
# AIRFLOW BUILD<|fim▁hole|>
# IMAGE STREAM
shutit.send_file('/tmp/imagestream.json','''
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
}''')
shutit.send('oc create -f /tmp/imagestream.json')
# BUILD CONFIG
shutit.send_file('secret.json','''{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mysecret"
},
"namespace": "user2",
"data": {
"username": "''' + base64.b64encode('myusername') + '''"
}
}''')
shutit.send('oc create -f secret.json')
shutit.send_file('/tmp/buildconfig.json','''
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow",
"labels": {
"name": "airflow-build"
}
},
"spec": {
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/ianmiell/shutit-airflow"
}
},
"strategy": {
"type": "Docker"
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
},
"volumes": {
"name": "secvol",
"secret": {
"secretname": "mysecret"
}
}
}
}
''')
shutit.send('oc create -f /tmp/buildconfig.json')
# DEPLOYMENT CONFIG
shutit.send_file('/tmp/deploymentconfig.json','''
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {
"strategy": {
"type": "Rolling",
"rollingParams": {
"updatePeriodSeconds": 1,
"intervalSeconds": 1,
"timeoutSeconds": 120
},
"resources": {}
},
"triggers": [
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"nodejs-helloworld"
],
"from": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
}
},
{
"type": "ConfigChange"
}
],
"replicas": 1,
"selector": {
"name":"airflow"
},
"template": {
"metadata": {
"labels": {
"name": "airflow"
}
},
"spec": {
"containers": [
{
"name": "airflow",
"image": "airflow",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"securityContext": {
"capabilities": {},
"privileged": false
}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
},
"status": {}
}
''')
shutit.send('oc create -f /tmp/deploymentconfig.json')
shutit.logout()
shutit.logout()
return True
def module():
return openshift_airflow(
'shutit.openshift_vm.openshift_vm.openshift_airflow', 1418326706.005,
description='',
maintainer='',
delivery_methods=['bash'],
depends=['shutit.openshift_vm.openshift_vm.openshift_vm']
)<|fim▁end|> | # Takes too long.
#shutit.send('oc describe buildconfig airflow',note='Ideally you would take this github url, and update your github webhooks for this project. But there is no public URL for this server so we will skip and trigger a build manually.')
#shutit.send('oc start-build airflow',note='Trigger a build by hand')
#shutit.send('sleep 60 && oc logs -f build/airflow-1',note='Follow the build and wait for it to terminate') |
<|file_name|>mqtt.py<|end_file_name|><|fim▁begin|># This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Santiago Hernandez Ramos <[email protected]>
# This program is published under GPLv2 license
from scapy.packet import Packet, bind_layers
from scapy.fields import FieldLenField, BitEnumField, StrLenField, \
ShortField, ConditionalField, ByteEnumField, ByteField, StrNullField
from scapy.layers.inet import TCP
from scapy.error import Scapy_Exception
# CUSTOM FIELDS
# source: http://stackoverflow.com/a/43717630
class VariableFieldLenField(FieldLenField):
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
data = []
while val:
if val > 127:
data.append(val & 127)
val /= 127
else:
data.append(val)
lastoffset = len(data) - 1
data = "".join(chr(val | (0 if i == lastoffset else 128))
for i, val in enumerate(data))
return s + data
if len(data) > 3:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
def getfield(self, pkt, s):
value = 0
for offset, curbyte in enumerate(s):
curbyte = ord(curbyte)
value += (curbyte & 127) * (128 ** offset)
if curbyte & 128 == 0:
return s[offset + 1:], value
if offset > 2:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
# LAYERS
CONTROL_PACKET_TYPE = {1: 'CONNECT',
2: 'CONNACK',
3: 'PUBLISH',
4: 'PUBACK',
5: 'PUBREC',
6: 'PUBREL',
7: 'PUBCOMP',
8: 'SUBSCRIBE',
9: 'SUBACK',
10: 'UNSUBSCRIBE',
11: 'UNSUBACK',
12: 'PINGREQ',
13: 'PINGRESP',
14: 'DISCONNECT'}
QOS_LEVEL = {0: 'At most once delivery',
1: 'At least once delivery',
2: 'Exactly once delivery'}
# source: http://stackoverflow.com/a/43722441
class MQTT(Packet):
name = "MQTT fixed header"
fields_desc = [
BitEnumField("type", 1, 4, CONTROL_PACKET_TYPE),
BitEnumField("DUP", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("QOS", 0, 2, QOS_LEVEL),
BitEnumField("RETAIN", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
# Since the size of the len field depends on the next layer, we need
# to "cheat" with the length_of parameter and use adjust parameter to
# calculate the value.<|fim▁hole|>
class MQTTConnect(Packet):
name = "MQTT connect"
fields_desc = [
FieldLenField("length", None, length_of="protoname"),
StrLenField("protoname", "",
length_from=lambda pkt: pkt.length),
ByteField("protolevel", 0),
BitEnumField("usernameflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("passwordflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willretainflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willQOSflag", 0, 2, QOS_LEVEL),
BitEnumField("willflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("cleansess", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("reserved", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
ShortField("klive", 0),
FieldLenField("clientIdlen", None, length_of="clientId"),
StrLenField("clientId", "",
length_from=lambda pkt: pkt.clientIdlen),
# Payload with optional fields depending on the flags
ConditionalField(FieldLenField("wtoplen", None, length_of="willtopic"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willtopic", "",
length_from=lambda pkt: pkt.wtoplen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("wmsglen", None, length_of="willmsg"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willmsg", "",
length_from=lambda pkt: pkt.wmsglen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("userlen", None, length_of="username"),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(StrLenField("username", "",
length_from=lambda pkt: pkt.userlen),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(FieldLenField("passlen", None, length_of="password"),
lambda pkt: pkt.passwordflag == 1),
ConditionalField(StrLenField("password", "",
length_from=lambda pkt: pkt.passlen),
lambda pkt: pkt.passwordflag == 1),
]
RETURN_CODE = {0: 'Connection Accepted',
1: 'Unacceptable protocol version',
2: 'Identifier rejected',
3: 'Server unavailable',
4: 'Bad username/password',
5: 'Not authorized'}
class MQTTConnack(Packet):
name = "MQTT connack"
fields_desc = [
ByteField("sessPresentFlag", 0),
ByteEnumField("retcode", 0, RETURN_CODE),
# this package has not payload
]
class MQTTPublish(Packet):
name = "MQTT publish"
fields_desc = [
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ConditionalField(ShortField("msgid", None),
lambda pkt: (pkt.underlayer.QOS == 1
or pkt.underlayer.QOS == 2)),
StrLenField("value", "",
length_from=lambda pkt: (pkt.underlayer.len -
pkt.length - 2)),
]
class MQTTPuback(Packet):
name = "MQTT puback"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrec(Packet):
name = "MQTT pubrec"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrel(Packet):
name = "MQTT pubrel"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubcomp(Packet):
name = "MQTT pubcomp"
fields_desc = [
ShortField("msgid", None),
]
class MQTTSubscribe(Packet):
name = "MQTT subscribe"
fields_desc = [
ShortField("msgid", None),
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ByteEnumField("QOS", 0, QOS_LEVEL),
]
ALLOWED_RETURN_CODE = {0: 'Success',
1: 'Success',
2: 'Success',
128: 'Failure'}
class MQTTSuback(Packet):
name = "MQTT suback"
fields_desc = [
ShortField("msgid", None),
ByteEnumField("retcode", None, ALLOWED_RETURN_CODE)
]
class MQTTUnsubscribe(Packet):
name = "MQTT unsubscribe"
fields_desc = [
ShortField("msgid", None),
StrNullField("payload", "")
]
class MQTTUnsuback(Packet):
name = "MQTT unsuback"
fields_desc = [
ShortField("msgid", None)
]
# LAYERS BINDINGS
bind_layers(TCP, MQTT, sport=1883)
bind_layers(TCP, MQTT, dport=1883)
bind_layers(MQTT, MQTTConnect, type=1)
bind_layers(MQTT, MQTTConnack, type=2)
bind_layers(MQTT, MQTTPublish, type=3)
bind_layers(MQTT, MQTTPuback, type=4)
bind_layers(MQTT, MQTTPubrec, type=5)
bind_layers(MQTT, MQTTPubrel, type=6)
bind_layers(MQTT, MQTTPubcomp, type=7)
bind_layers(MQTT, MQTTSubscribe, type=8)
bind_layers(MQTT, MQTTSuback, type=9)
bind_layers(MQTT, MQTTUnsubscribe, type=10)
bind_layers(MQTT, MQTTUnsuback, type=11)
bind_layers(MQTTConnect, MQTT)
bind_layers(MQTTConnack, MQTT)
bind_layers(MQTTPublish, MQTT)
bind_layers(MQTTPuback, MQTT)
bind_layers(MQTTPubrec, MQTT)
bind_layers(MQTTPubrel, MQTT)
bind_layers(MQTTPubcomp, MQTT)
bind_layers(MQTTSubscribe, MQTT)
bind_layers(MQTTSuback, MQTT)
bind_layers(MQTTUnsubscribe, MQTT)
bind_layers(MQTTUnsuback, MQTT)<|fim▁end|> | VariableFieldLenField("len", None, length_of="len",
adjust=lambda pkt, x: len(pkt.payload),),
]
|
<|file_name|>securitygroups.go<|end_file_name|><|fim▁begin|>package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// SecurityGroupsClient is the network Client
type SecurityGroupsClient struct {
BaseClient
}
// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient client.
func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient {
return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewSecurityGroupsClientWithBaseURI creates an instance of the SecurityGroupsClient client using a custom endpoint.
// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient {
return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a network security group in the specified resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkSecurityGroupName - the name of the network security group.
// parameters - parameters supplied to the create or update network security group operation.
func (client SecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (result SecurityGroupsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client SecurityGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (future SecurityGroupsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent {
sg, err = client.CreateOrUpdateResponder(sg.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified network security group.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkSecurityGroupName - the name of the network security group.
func (client SecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (result SecurityGroupsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, networkSecurityGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client SecurityGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future SecurityGroupsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client SecurityGroupsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the specified network security group.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkSecurityGroupName - the name of the network security group.
// expand - expands referenced resources.
func (client SecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, networkSecurityGroupName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure responding to request")
return
}
<|fim▁hole|>
// GetPreparer prepares the Get request.
func (client SecurityGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all network security groups in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client SecurityGroupsClient) List(ctx context.Context, resourceGroupName string) (result SecurityGroupListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.List")
defer func() {
sc := -1
if result.sglr.Response.Response != nil {
sc = result.sglr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.sglr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request")
return
}
result.sglr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to request")
return
}
if result.sglr.hasNextLink() && result.sglr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client SecurityGroupsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client SecurityGroupsClient) listNextResults(ctx context.Context, lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) {
req, err := lastResults.securityGroupListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client SecurityGroupsClient) ListComplete(ctx context.Context, resourceGroupName string) (result SecurityGroupListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName)
return
}
// ListAll gets all network security groups in a subscription.
func (client SecurityGroupsClient) ListAll(ctx context.Context) (result SecurityGroupListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.ListAll")
defer func() {
sc := -1
if result.sglr.Response.Response != nil {
sc = result.sglr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listAllNextResults
req, err := client.ListAllPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request")
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.sglr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request")
return
}
result.sglr, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to request")
return
}
if result.sglr.hasNextLink() && result.sglr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client SecurityGroupsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listAllNextResults retrieves the next set of results, if any.
func (client SecurityGroupsClient) listAllNextResults(ctx context.Context, lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) {
req, err := lastResults.securityGroupListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", resp, "Failure responding to next results request")
}
return
}
// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
func (client SecurityGroupsClient) ListAllComplete(ctx context.Context) (result SecurityGroupListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.ListAll")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListAll(ctx)
return
}
// UpdateTags updates a network security group tags.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkSecurityGroupName - the name of the network security group.
// parameters - parameters supplied to update network security group tags.
func (client SecurityGroupsClient) UpdateTags(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters TagsObject) (result SecurityGroupsUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.UpdateTags")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "UpdateTags", nil, "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client SecurityGroupsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client SecurityGroupsClient) UpdateTagsSender(req *http.Request) (future SecurityGroupsUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client SecurityGroupsClient) (sg SecurityGroup, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsUpdateTagsFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent {
sg, err = client.UpdateTagsResponder(sg.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.SecurityGroupsUpdateTagsFuture", "Result", sg.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client SecurityGroupsClient) UpdateTagsResponder(resp *http.Response) (result SecurityGroup, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}<|fim▁end|> | return
} |
<|file_name|>GalleryRoute.ts<|end_file_name|><|fim▁begin|>'use strict';
import express = require('express');
const router: express.Router = express.Router();
<|fim▁hole|>
router.get('/:limit', service.getAll);
router.get('/main/:limit', service.getAll);
router.get('/main/random/:limit', service.getRandomGallery);
router.get('/main/popular/:limit', service.getPopularGallery);
router.get('/category/:id/:itemid', service.getGalleries);
router.get('/single/random', service.getRandomCollection);
router.get('/single/:id/:catid/:itemid', service.getCollection);
module.exports = router;<|fim▁end|> | const service = require('../../services').GalleryService; |
<|file_name|>part1.rs<|end_file_name|><|fim▁begin|>// adventofcode - day 9
// part 1
use std::io::prelude::*;
use std::fs::File;
use std::collections::HashSet;
struct Graph {
nodes: Option<Vec<String>>,
matrix: Option<Vec<Vec<i32>>>,
}
impl Graph {
fn new(names: HashSet<&str>) -> Graph {
let mut graph = Graph{nodes: None, matrix: None};
let size = names.len();
graph.nodes = Some(Vec::with_capacity(size));
match graph.nodes {
Some(ref mut nodes) => {
for name in names {
nodes.push(name.to_string());
}
nodes.sort();
},
None => {
panic!("Failed to create graph!");
}
}
graph.matrix = Some(Vec::<Vec<i32>>::with_capacity(size));
match graph.matrix {
Some(ref mut matrix) => {
for ii in 0..size {
matrix.push(Vec::<i32>::with_capacity(size));
for _ in 0..size {
matrix[ii].push(0);
}
}
},
None => {
panic!("Failed to create graph!");
}
}
graph
}
fn size(&self) -> usize {
match self.nodes {
Some(ref nodes) => nodes.len(),
None => 0,
}
}
#[allow(dead_code)]
fn get_node_names(&self) -> Vec<String> {
match self.nodes {
Some(ref nodes) => nodes.clone(),
None => Vec::<String>::new(),
}
}
fn insert_edge(&mut self, src: &String, dest: &String, length: i32) {
let src_idx = match self.nodes {
Some(ref nodes) => match nodes.binary_search(src){
Ok(x) => x,
Err(e) => {
println!("Error: {}", e);
return;
},
},
None => return,
};
let dst_idx = match self.nodes {
Some(ref nodes) => match nodes.binary_search(dest){
Ok(x) => x,
Err(e) => {
println!("Error: {}", e);
return;
},
},
None => return,
};
match self.matrix {
Some(ref mut matrix) => {
matrix[src_idx][dst_idx] = length;
matrix[dst_idx][src_idx] = length;
},
None => return,
}<|fim▁hole|> fn calculate_path(&self, visited: Vec<usize>) -> (i32, Vec<usize>){
if visited.len() == self.size() {
return (self.calculate_cost_of_path(&visited), visited);
}
let mut min_cost = std::i32::MAX;
let mut min_path = Vec::new();
for ii in 0..self.size(){
if ! visited.contains(&ii){
let mut path = visited.clone();
path.push(ii);
let (cost, path) = self.calculate_path(path);
if cost < min_cost {
min_cost = cost;
min_path = path;
}
}
}
(min_cost, min_path)
}
fn calculate_cost_of_path(&self, path: &Vec<usize>) -> i32 {
let mut locations = path.iter();
let mut from = locations.next().unwrap();
let mut cost = 0i32;
loop {
match locations.next() {
Some(to) => {
cost += self.get_edge_cost(*from, *to);
from = to;
},
None => return cost,
}
}
}
fn get_edge_cost(&self, from: usize, to: usize) -> i32 {
match self.matrix {
Some(ref matrix) => matrix[from][to],
None => 0,
}
}
}
fn main(){
println!("Advent of Code - day 9 | part 1");
// import data
let data = import_data();
let graph = match parse_data(data){
Some(x) => x,
None => panic!("Couldn\'t parse data!"),
};
//println!("Graph has the following nodes ({}):", graph.size());
//for name in graph.get_node_names() {
// println!("{}", name);
//}
let path = Vec::new();
let (cost, path) = graph.calculate_path(path);
println!("Shortest path costs: {}", cost);
for location in path {
println!("{}", location);
}
}
fn parse_data(data: String) -> Option<Graph> {
let mut all_names = HashSet::new();
// first: scan data for names
for line in data.lines(){
let names = line.split(" to ").flat_map(|s| s.split(" = ")).take(2);
for name in names {
all_names.insert(name);
}
}
let mut graph = Graph::new(all_names);
for line in data.lines(){
let info = line.split(" to ")
.flat_map(|s| s.split(" = "))
.map(|s| s.parse::<String>().unwrap())
.collect::<Vec<String>>();
let length = info[2].parse::<i32>().unwrap();
graph.insert_edge(&info[0], &info[1], length);
}
Some(graph)
}
// This function simply imports the data set from a file called input.txt
fn import_data() -> String {
let mut file = match File::open("../../inputs/09.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
// remove trailing \n
data.pop();
data
}<|fim▁end|> | }
// bruteforce solution |
<|file_name|>filter.py<|end_file_name|><|fim▁begin|># Filename: filter.py
"""
LendingClub2 Filter Module
"""
# Standard libraries
import collections
from abc import abstractmethod
from abc import ABC
# lendingclub2
from lendingclub2.error import LCError
# pylint: disable=too-few-public-methods
class BorrowerTrait(ABC):
"""
Abstract base class to define borrowers of interest
"""
@abstractmethod
def matches(self, borrower):
"""
Check if borrower has the trait
:param borrower: instance of :py:class:`~lendingclub2.loan.Borrower`.
:returns: boolean
"""
return True
class BorrowerEmployedTrait(BorrowerTrait):
"""
Check if borrower is employed
"""
def matches(self, borrower):
"""
Check if borrower has the trait
:param borrower: instance of :py:class:`~lendingclub2.loan.Borrower`.
:returns: boolean
"""
return borrower.employed
class Filter(ABC):
"""
Abstract base class for filtering the loan
"""
@abstractmethod
def meet_requirement(self, loan):
"""
Check if the loan is meeting the filter requirement
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
return True
class FilterByApproved(Filter):
"""
Filter by if the loan is already approved
"""
def meet_requirement(self, loan):
"""
Check if the loan is meeting the filter requirement
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
return loan.approved
class FilterByBorrowerTraits(Filter):
"""
Filter to have borrower matching specific traits
"""
# pylint: disable=super-init-not-called
def __init__(self, traits):
"""
Constructor
:param traits: instance of
:py:class:`~lendingclub2.filter.BorrowerTrait`<|fim▁hole|> or iterable of instance of
:py:class:`~lendingclub2.filter.BorrowerTrait`.
"""
if isinstance(traits, collections.abc.Iterable):
self._specs = traits
elif isinstance(traits, BorrowerTrait):
self._specs = (traits, )
else:
fstr = "invalid traits type for {}".format(self.__class__.__name__)
raise LCError(fstr)
# pylint: enable=super-init-not-called
def meet_requirement(self, loan):
"""
Check if the loan is meeting the filter requirement
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
for spec in self._specs:
if not spec.matches(loan.borrower):
return False
return True
class FilterByFunded(Filter):
"""
Filter by percentage funded
"""
# pylint: disable=super-init-not-called
def __init__(self, percentage):
"""
Constructor.
:param percentage: float (between 0 and 100 inclusive)
"""
if percentage < 0.0 or percentage > 100.0:
fstr = "percentage needs to be between 0 and 100 (inclusive)"
raise LCError(fstr)
self._percentage = percentage
# pylint: enable=super-init-not-called
def meet_requirement(self, loan):
"""
The loan would have to be at least the percentage value to meet the
requirement.
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
return loan.percent_funded >= self._percentage
class FilterByGrade(Filter):
"""
Filter by grade
"""
# pylint: disable=super-init-not-called
def __init__(self, grades=None):
"""
Constructor
:param grades: iterable of string (default: None, example: ('A', 'B'))
"""
self._grades = grades
# pylint: enable=super-init-not-called
def meet_requirement(self, loan):
"""
Check if the loan is meeting the filter requirement
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
if self._grades and loan.grade in self._grades:
return True
return False
class FilterByTerm(Filter):
"""
Filter by term
"""
# pylint: disable=super-init-not-called
def __init__(self, value=36, min_val=None, max_val=None):
"""
Constructor. To filter by a specific value, set value to a number.
To filter by a range, set value to None, and set min_val and max_val
to integers.
:param value: int - exact term value (default: 36)
:param min_val: int - minimum term value (inclusive) (default: None)
:param max_val: int - maximum term value (inclusive) (default: None)
"""
if value is not None and (min_val is not None or max_val is not None):
fstr = "value and min_val, max_val are mutually exclusive"
details = "value: {}".format(value)
if min_val is not None:
details += ", min_val: {}".format(min_val)
if max_val is not None:
details += ", max_val: {}".format(max_val)
raise LCError(fstr, details=details)
if min_val is not None and max_val is not None:
if max_val > min_val:
fstr = "max_val cannot be greater than min_val"
raise LCError(fstr)
elif value is None and (min_val is None or max_val is None):
fstr = "invalid specification on the values"
hint = "either value or min_val + max_val combo should be specified"
raise LCError(fstr, hint=hint)
self._value = value
self._min_value = min_val
self._max_value = max_val
# pylint: enable=super-init-not-called
def meet_requirement(self, loan):
"""
Check if the loan is meeting the filter requirement
:param loan: instance of :py:class:`~lendingclub2.loan.Loan`.
:returns: boolean
"""
if self._value is not None:
return loan.term == self._value
return self._min_value <= loan.term <= self._max_value
# pylint: enable=too-few-public-methods<|fim▁end|> | |
<|file_name|>STM32F429IDISCOVERY.py<|end_file_name|><|fim▁begin|>#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 F429 Discovery",
'link' : [ "http://www.st.com/web/catalog/tools/FM116/SC959/SS1532/LN1199/PF259090" ],
'default_console' : "EV_SERIAL1",
'variables' : 5450,
'binary_name' : 'espruino_%v_stm32f429idiscovery.bin',
};
chip = {
'part' : "STM32F429ZIT6",
'family' : "STM32F4",
'package' : "LQFP144",
'ram' : 128,#256,
'flash' : 512, #2048,
'speed' : 168,
'usart' : 6,
'spi' : 3,
'i2c' : 3,
'adc' : 3,
'dac' : 2,
};
# left-right, or top-bottom order
board = {
'left' : [ ], # fixme
'left2' : [ ],
'right2' : [ ],
'right' : [ ],
};
devices = {
'OSC' : { 'pin_1' : 'H0',
'pin_2' : 'H1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'G13' }, # green
'LED2' : { 'pin' : 'G14' }, # red
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_dm' : 'B14',
'pin_dp' : 'B15',
'pin_vbus' : 'B13',
'pin_id' : 'B12',
'pin_pso' : 'C4', # Power supply enable
'pin_oc' : 'C5', # Overcurrent
},
'MEMS' : { 'device' : 'L3GD20',
'pin_cs' : 'C1',
'pin_int1' : 'A1',
'pin_int2' : 'A2',
'pin_mosi' : 'F9',
'pin_miso' : 'F8',
'pin_sck' : 'F7' },
'TOUCHSCREEN' : {
'pin_irq' : 'A15',
'pin_cs' : '',
'pin_scl' : 'A8',
'pin_sda' : 'C9',
},
'LCD' : {
'width' : 320, 'height' : 240, 'bpp' : 16, 'controller' : 'fsmc', 'controller2' : 'ili9341',
'pin_d0' : 'D6',
'pin_d1' : 'G11',
'pin_d2' : 'G12',
'pin_d3' : 'A3',
'pin_d4' : 'B8',
'pin_d5' : 'B9',
'pin_d6' : 'A6',
'pin_d7' : 'G10',
'pin_d8' : 'B10',
'pin_d9' : 'B11',
'pin_d10' : 'C7',
'pin_d11' : 'D3',
'pin_d12' : 'C10',
'pin_d13' : 'B0',
'pin_d14' : 'A11',
'pin_d15' : 'A12',
'pin_d16' : 'B1',
'pin_d16' : 'G6',
'pin_rd' : 'D12', # RDX
'pin_wr' : 'D13',# WRQ (or SPI DC - data=1/command=0)
'pin_cs' : 'C2', # SPI CS (enable=0)
'pin_en' : 'F10',
'pin_vsync' : 'A4',
'pin_hsync' : 'C6',
'pin_dotlck' : 'G7',
'pin_dc' : 'F7', # SPI CLK
'pin_sda' : 'F9', # SPI SDI/SDO
'pin_im0' : 'D2', # solder bridge normally open, pulled to 0
'pin_im1' : 'D4', # solder bridge normally open, pulled to 1
'pin_im2' : 'D5', # solder bridge normally open, pulled to 1
'pin_im3' : 'D7', # solder bridge normally open, pulled to 0
},
'SDRAM' : {
'pin_sdcke1' : 'B5',
'pin_sdne1' : 'B6',
'pin_sdnwe' : 'C0',
'pin_d2' : 'D0',
'pin_d3' : 'D1',
'pin_d13' : 'D8',
'pin_d14' : 'D9',
'pin_d15' : 'D10',
'pin_d0' : 'D14',
'pin_d1' : 'D15',
'pin_nbl0' : 'E0',
'pin_nbl1' : 'E1',
'pin_d4' : 'E7',
'pin_d5' : 'E8',
'pin_d6' : 'E9',
'pin_d7' : 'E10',
'pin_d8' : 'E11',
'pin_d9' : 'E12',
'pin_d10' : 'E13',
'pin_d11' : 'E14',
'pin_d12' : 'E15',
'pin_a0' : 'F0',
'pin_a1' : 'F1',
'pin_a2' : 'F2',
'pin_a3' : 'F3',
'pin_a4' : 'F4',
'pin_a5' : 'F5',
'pin_sdnras' : 'F11',
'pin_a6' : 'F12',
'pin_a7' : 'F13',
'pin_a8' : 'F14',
'pin_a9' : 'F15',
'pin_a10' : 'G0',
'pin_a11' : 'G1',
'pin_ba0' : 'G4',
'pin_ba1' : 'G5',
'pin_sdclk' : 'G8',
'pin_sdncas' : 'G15',
},
'JTAG' : {
'pin_MS' : 'A13',
'pin_CK' : 'A14',
'pin_DI' : 'A15'
},
};
board_css = """
#board {
width: 680px;
height: 1020px;
left: 200px;
background-image: url(img/STM32F429IDISCOVERY.jpg);
}
#boardcontainer {
height: 1020px;
}
#left {
top: 375px;
right: 590px;
}
#left2 {
top: 375px;
left: 105px;
}
#right {
top: 375px;
left: 550px;
}
#right2 {
top: 375px;
right: 145px;
}
""";
def get_pins():<|fim▁hole|><|fim▁end|> | pins = pinutils.scan_pin_file([], 'stm32f40x.csv', 6, 9, 10)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"]) |
<|file_name|>services.py<|end_file_name|><|fim▁begin|># Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the<|fim▁hole|># License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from django.db import connection
@transaction.atomic
def bulk_update_userstory_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_userstorycustomattribute set "order" = $1
where custom_attributes_userstorycustomattribute.id = $2 and
custom_attributes_userstorycustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_task_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_taskcustomattribute set "order" = $1
where custom_attributes_taskcustomattribute.id = $2 and
custom_attributes_taskcustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()
@transaction.atomic
def bulk_update_issue_custom_attribute_order(project, user, data):
cursor = connection.cursor()
sql = """
prepare bulk_update_order as update custom_attributes_issuecustomattribute set "order" = $1
where custom_attributes_issuecustomattribute.id = $2 and
custom_attributes_issuecustomattribute.project_id = $3;
"""
cursor.execute(sql)
for id, order in data:
cursor.execute("EXECUTE bulk_update_order (%s, %s, %s);",
(order, id, project.id))
cursor.execute("DEALLOCATE bulk_update_order")
cursor.close()<|fim▁end|> | |
<|file_name|>use_closure_externs.js<|end_file_name|><|fim▁begin|>goog.module('test_files.use_closure_externs.use_closure_externs');var module = module || {id: 'test_files/use_closure_externs/use_closure_externs.js'};/**
* @fileoverview A source file that uses types that are used in .d.ts files, but
* that are not available or use different names in Closure's externs.<|fim▁hole|>console.log('work around TS dropping consecutive comments');
let /** @type {!NodeListOf<!HTMLParagraphElement>} */ x = document.getElementsByTagName('p');
console.log(x);
const /** @type {(null|!RegExpExecArray)} */ res = ((/asd/.exec('asd asd')));
console.log(res);<|fim▁end|> | * @suppress {checkTypes} checked by tsc
*/ |
<|file_name|>data_source_google_compute_ssl_policy.go<|end_file_name|><|fim▁begin|>package google
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
func dataSourceGoogleComputeSslPolicy() *schema.Resource {
// Generate datasource schema from resource
dsSchema := datasourceSchemaFromResourceSchema(resourceComputeSslPolicy().Schema)
// Set 'Required' schema elements
addRequiredFieldsToSchema(dsSchema, "name")
// Set 'Optional' schema elements
addOptionalFieldsToSchema(dsSchema, "project")
return &schema.Resource{
Read: datasourceComputeSslPolicyRead,
Schema: dsSchema,
}
}
func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {<|fim▁hole|> d.SetId(fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName))
return resourceComputeSslPolicyRead(d, meta)
}<|fim▁end|> | return err
}
policyName := d.Get("name").(string)
|
<|file_name|>grpc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import empty_pb2 # type: ignore
from .base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
class SessionEntityTypesGrpcTransport(SessionEntityTypesTransport):
"""gRPC backend transport for SessionEntityTypes.
Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_session_entity_types(
self,
) -> Callable[
[session_entity_type.ListSessionEntityTypesRequest],
session_entity_type.ListSessionEntityTypesResponse,
]:
r"""Return a callable for the list session entity types method over gRPC.
Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.ListSessionEntityTypesRequest],
~.ListSessionEntityTypesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_session_entity_types" not in self._stubs:
self._stubs["list_session_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/ListSessionEntityTypes",
request_serializer=session_entity_type.ListSessionEntityTypesRequest.serialize,
response_deserializer=session_entity_type.ListSessionEntityTypesResponse.deserialize,
)
return self._stubs["list_session_entity_types"]
@property
def get_session_entity_type(
self,
) -> Callable[
[session_entity_type.GetSessionEntityTypeRequest],
session_entity_type.SessionEntityType,
]:
r"""Return a callable for the get session entity type method over gRPC.
Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.GetSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_session_entity_type" not in self._stubs:
self._stubs["get_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/GetSessionEntityType",
request_serializer=session_entity_type.GetSessionEntityTypeRequest.serialize,
response_deserializer=session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["get_session_entity_type"]
@property
def create_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.CreateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the create session entity type method over gRPC.
Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.CreateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_session_entity_type" not in self._stubs:
self._stubs["create_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/CreateSessionEntityType",
request_serializer=gcd_session_entity_type.CreateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["create_session_entity_type"]
@property
def update_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.UpdateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the update session entity type method over gRPC.
Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.UpdateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_session_entity_type" not in self._stubs:
self._stubs["update_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/UpdateSessionEntityType",
request_serializer=gcd_session_entity_type.UpdateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["update_session_entity_type"]
@property
def delete_session_entity_type(
self,
) -> Callable[
[session_entity_type.DeleteSessionEntityTypeRequest], empty_pb2.Empty
]:
r"""Return a callable for the delete session entity type method over gRPC.
Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.DeleteSessionEntityTypeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC<|fim▁hole|> # the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_session_entity_type" not in self._stubs:
self._stubs["delete_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/DeleteSessionEntityType",
request_serializer=session_entity_type.DeleteSessionEntityTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_session_entity_type"]
def close(self):
self.grpc_channel.close()
__all__ = ("SessionEntityTypesGrpcTransport",)<|fim▁end|> | on the server.
"""
# Generate a "stub function" on-the-fly which will actually make |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin
from achievs.models import Achievement
# from achievs.models import Gold
# from achievs.models import Silver
# from achievs.models import Bronze
# from achievs.models import Platinum
from achievs.models import Level
# class PlatinumInline(admin.StackedInline):
# model=Platinum
# class GoldInline(admin.StackedInline):
# model=Gold
# class SilverInline(admin.StackedInline):
# model=Silver
# class BronzeInline(admin.StackedInline):
# model=Bronze
class LevelInline(admin.StackedInline):
model=Level
class AchievementAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
('Date information', {'fields': ['pub_date']}),
]
#inlines=[GoldInline, SilverInline, BronzeInline, PlatinumInline]
inlines=[LevelInline]
list_display = ('name', 'pub_date')<|fim▁hole|> list_filter=['pub_date']
search_fields=['name']
date_hierarchy='pub_date'
# admin.site.register(Gold)
# admin.site.register(Silver)
# admin.site.register(Bronze)
# admin.site.register(Platinum)
admin.site.register(Level)
admin.site.register(Achievement, AchievementAdmin)<|fim▁end|> | |
<|file_name|>Ui_MainWindow.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\github repos\Python\A05_SimplyGame\Binaries\MyView.ui'
#
# Created: Tue Oct 25 22:22:12 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(808, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(240, 110, 561, 281))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setHorizontalSpacing(7)
self.gridLayout.setVerticalSpacing(9)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_5 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 0, 4, 1, 1)
self.pushButton_1 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_1.setObjectName("pushButton_1")
self.gridLayout.addWidget(self.pushButton_1, 0, 0, 1, 1)
self.pushButton_9 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_9.setObjectName("pushButton_9")
self.gridLayout.addWidget(self.pushButton_9, 1, 3, 1, 1)
self.pushButton_6 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 1, 0, 1, 1)
self.pushButton_10 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_10.setObjectName("pushButton_10")
self.gridLayout.addWidget(self.pushButton_10, 1, 4, 1, 1)
self.pushButton_15 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_15.setObjectName("pushButton_15")
self.gridLayout.addWidget(self.pushButton_15, 2, 4, 1, 1)
self.pushButton_4 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 0, 3, 1, 1)
self.pushButton_11 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_11.setObjectName("pushButton_11")
self.gridLayout.addWidget(self.pushButton_11, 2, 0, 1, 1)
self.pushButton_12 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_12.setObjectName("pushButton_12")
self.gridLayout.addWidget(self.pushButton_12, 2, 1, 1, 1)
self.pushButton_7 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 1, 1, 1, 1)
self.pushButton_3 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 0, 2, 1, 1)
self.pushButton_13 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_13.setObjectName("pushButton_13")
self.gridLayout.addWidget(self.pushButton_13, 2, 2, 1, 1)
self.pushButton_8 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout.addWidget(self.pushButton_8, 1, 2, 1, 1)
self.pushButton_14 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_14.setObjectName("pushButton_14")
self.gridLayout.addWidget(self.pushButton_14, 2, 3, 1, 1)
self.pushButton_2 = QtGui.QPushButton(self.gridLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(50, 70, 191, 481))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(self.formLayoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.label_2 = QtGui.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtGui.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtGui.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtGui.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(12, QtGui.QFormLayout.LabelRole, self.label_5)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(1, QtGui.QFormLayout.LabelRole, spacerItem)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(5, QtGui.QFormLayout.LabelRole, spacerItem1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(8, QtGui.QFormLayout.LabelRole, spacerItem2)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(11, QtGui.QFormLayout.LabelRole, spacerItem3)
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(2, QtGui.QFormLayout.LabelRole, spacerItem4)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(4, QtGui.QFormLayout.LabelRole, spacerItem5)
spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(7, QtGui.QFormLayout.LabelRole, spacerItem6)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(10, QtGui.QFormLayout.LabelRole, spacerItem7)
self.label_6 = QtGui.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.label_6)
self.label_7 = QtGui.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.label_7)
self.label_8 = QtGui.QLabel(self.formLayoutWidget)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.label_8)
self.label_9 = QtGui.QLabel(self.formLayoutWidget)
self.label_9.setObjectName("label_9")
<|fim▁hole|> self.formLayout.setWidget(12, QtGui.QFormLayout.FieldRole, self.label_10)
self.gridLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(240, 390, 561, 161))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem8, 1, 2, 1, 1)
self.pushButton_24 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_24.setObjectName("pushButton_24")
self.gridLayout_2.addWidget(self.pushButton_24, 1, 1, 1, 1)
self.pushButton_25 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_25.setObjectName("pushButton_25")
self.gridLayout_2.addWidget(self.pushButton_25, 1, 3, 1, 1)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem9, 1, 4, 1, 1)
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem10, 1, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(240, 0, 561, 111))
self.label_11.setObjectName("label_11")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 808, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton_25, QtCore.SIGNAL("clicked()"), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_1.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_9.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_6.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_10.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_15.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_4.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_11.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_12.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_7.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_3.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_13.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_8.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_14.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "offen:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "korrket:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "falsch:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "gesamt:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Spiele:", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("MainWindow", "offenAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("MainWindow", "korrektAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("MainWindow", "falschAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("MainWindow", "gesamtAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("MainWindow", "spieleAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_24.setText(QtGui.QApplication.translate("MainWindow", "Neu", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_25.setText(QtGui.QApplication.translate("MainWindow", "Ende", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">Drücken Sie die Buttons in aufsteigender Reihenfolge</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))<|fim▁end|> | self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.label_9)
self.label_10 = QtGui.QLabel(self.formLayoutWidget)
self.label_10.setObjectName("label_10")
|
<|file_name|>eip150_state.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.<|fim▁hole|>use super::state::json_chain_test;
fn do_json_test(json_data: &[u8]) -> Vec<String> {
json_chain_test(json_data, ChainEra::Eip150)
}
declare_test!{StateTests_EIP150_stEIPSpecificTest, "StateTests/EIP150/stEIPSpecificTest"}
declare_test!{StateTests_EIP150_stEIPsingleCodeGasPrices, "StateTests/EIP150/stEIPsingleCodeGasPrices"}
declare_test!{StateTests_EIP150_stMemExpandingEIPCalls, "StateTests/EIP150/stMemExpandingEIPCalls"}
declare_test!{StateTests_EIP150_stCallCodes, "StateTests/EIP150/Homestead/stCallCodes"}
declare_test!{StateTests_EIP150_stCallCreateCallCodeTest, "StateTests/EIP150/Homestead/stCallCreateCallCodeTest"}
declare_test!{StateTests_EIP150_stDelegatecallTest, "StateTests/EIP150/Homestead/stDelegatecallTest"}
declare_test!{StateTests_EIP150_stInitCodeTest, "StateTests/EIP150/Homestead/stInitCodeTest"}
declare_test!{StateTests_EIP150_stLogTests, "StateTests/EIP150/Homestead/stLogTests"}
declare_test!{heavy => StateTests_EIP150_stMemoryStressTest, "StateTests/EIP150/Homestead/stMemoryStressTest"}
declare_test!{heavy => StateTests_EIP150_stMemoryTest, "StateTests/EIP150/Homestead/stMemoryTest"}
declare_test!{StateTests_EIP150_stPreCompiledContracts, "StateTests/EIP150/Homestead/stPreCompiledContracts"}
declare_test!{heavy => StateTests_EIP150_stQuadraticComplexityTest, "StateTests/EIP150/Homestead/stQuadraticComplexityTest"}
declare_test!{StateTests_EIP150_stRecursiveCreate, "StateTests/EIP150/Homestead/stRecursiveCreate"}
declare_test!{StateTests_EIP150_stRefundTest, "StateTests/EIP150/Homestead/stRefundTest"}
declare_test!{StateTests_EIP150_stSpecialTest, "StateTests/EIP150/Homestead/stSpecialTest"}
declare_test!{StateTests_EIP150_stSystemOperationsTest, "StateTests/EIP150/Homestead/stSystemOperationsTest"}
declare_test!{StateTests_EIP150_stTransactionTest, "StateTests/EIP150/Homestead/stTransactionTest"}
declare_test!{StateTests_EIP150_stWalletTest, "StateTests/EIP150/Homestead/stWalletTest"}<|fim▁end|> |
use super::test_common::*;
use tests::helpers::*; |
<|file_name|>bloom-cmd.py<|end_file_name|><|fim▁begin|>#!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
import glob, os, sys, tempfile
from bup import options, git, bloom
from bup.helpers import (add_error, debug1, handle_ctrl_c, log, progress, qprogress,
saved_errors)
optspec = """
bup bloom [options...]
--
ruin ruin the specified bloom file (clearing the bitfield)
f,force ignore existing bloom file and regenerate it from scratch
o,output= output bloom filename (default: auto)
d,dir= input directory to look for idx files (default: auto)
k,hashes= number of hash functions to use (4 or 5) (default: auto)
c,check= check the given .idx file against the bloom filter
"""
def ruin_bloom(bloomfilename):
rbloomfilename = git.repo_rel(bloomfilename)
if not os.path.exists(bloomfilename):
log("%s\n" % bloomfilename)
add_error("bloom: %s not found to ruin\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1)
b.map[16:16+2**b.bits] = '\0' * 2**b.bits
def check_bloom(path, bloomfilename, idx):
rbloomfilename = git.repo_rel(bloomfilename)
ridx = git.repo_rel(idx)
if not os.path.exists(bloomfilename):
log("bloom: %s: does not exist.\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename)
if not b.valid():
add_error("bloom: %r is invalid.\n" % rbloomfilename)
return
base = os.path.basename(idx)
if base not in b.idxnames:
log("bloom: %s does not contain the idx.\n" % rbloomfilename)
return
if base == idx:
idx = os.path.join(path, idx)
log("bloom: bloom file: %s\n" % rbloomfilename)
log("bloom: checking %s\n" % ridx)
for objsha in git.open_idx(idx):
if not b.exists(objsha):
add_error("bloom: ERROR: object %s missing"
% str(objsha).encode('hex'))
_first = None
def do_bloom(path, outfilename):
global _first
b = None
if os.path.exists(outfilename) and not opt.force:
b = bloom.ShaBloom(outfilename)
if not b.valid():
debug1("bloom: Existing invalid bloom found, regenerating.\n")
b = None
add = []
rest = []
add_count = 0
rest_count = 0
for i,name in enumerate(glob.glob('%s/*.idx' % path)):
progress('bloom: counting: %d\r' % i)
ix = git.open_idx(name)
ixbase = os.path.basename(name)
if b and (ixbase in b.idxnames):
rest.append(name)
rest_count += len(ix)
else:
add.append(name)
add_count += len(ix)
total = add_count + rest_count
if not add:
debug1("bloom: nothing to do.\n")
return
if b:
if len(b) != rest_count:
debug1("bloom: size %d != idx total %d, regenerating\n"
% (len(b), rest_count))
b = None
elif (b.bits < bloom.MAX_BLOOM_BITS and
b.pfalse_positive(add_count) > bloom.MAX_PFALSE_POSITIVE):
debug1("bloom: regenerating: adding %d entries gives "
"%.2f%% false positives.\n"
% (add_count, b.pfalse_positive(add_count)))
b = None
else:
b = bloom.ShaBloom(outfilename, readwrite=True, expected=add_count)
if not b: # Need all idxs to build from scratch
add += rest
add_count += rest_count
del rest
del rest_count
msg = b is None and 'creating from' or 'adding'
if not _first: _first = path
dirprefix = (_first != path) and git.repo_rel(path)+': ' or ''
progress('bloom: %s%s %d file%s (%d object%s).\n'
% (dirprefix, msg,
len(add), len(add)!=1 and 's' or '',
add_count, add_count!=1 and 's' or ''))
tfname = None
if b is None:
tfname = os.path.join(path, 'bup.tmp.bloom')
b = bloom.create(tfname, expected=add_count, k=opt.k)
count = 0
icount = 0
for name in add:
ix = git.open_idx(name)
qprogress('bloom: writing %.2f%% (%d/%d objects)\r'
% (icount*100.0/add_count, icount, add_count))
b.add_idx(ix)
count += 1
icount += len(ix)
# Currently, there's an open file object for tfname inside b.
# Make sure it's closed before rename.
b.close()
if tfname:
os.rename(tfname, outfilename)
handle_ctrl_c()
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if extra:
o.fatal('no positional parameters expected')
git.check_repo_or_die()
if not opt.check and opt.k and opt.k not in (4,5):
o.fatal('only k values of 4 and 5 are supported')
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
debug1('bloom: scanning %s\n' % path)
outfilename = opt.output or os.path.join(path, 'bup.bloom')
if opt.check:
check_bloom(path, outfilename, opt.check)
elif opt.ruin:
ruin_bloom(outfilename)<|fim▁hole|> log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors))
sys.exit(1)
elif opt.check:
log('All tests passed.\n')<|fim▁end|> | else:
do_bloom(path, outfilename)
if saved_errors: |
<|file_name|>anon_trait_static_method_exe.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_camel_case_types)]
// aux-build:anon_trait_static_method_lib.rs
extern crate anon_trait_static_method_lib;
use anon_trait_static_method_lib::Foo;
pub fn main() {<|fim▁hole|><|fim▁end|> | let x = Foo::new();
println!("{}", x.x);
} |
<|file_name|>datalib.py<|end_file_name|><|fim▁begin|>"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from collections import defaultdict
from structlog import get_logger
<|fim▁hole|>
logger = get_logger()
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
self.pathExpression = name
def __eq__(self, other):
if isinstance(other, TimeSeries):
color_eq = True
if hasattr(self, 'color'):
if hasattr(other, 'color'):
color_eq = (self.color == other.color)
else:
color_eq = False
elif hasattr(other, 'color'):
color_eq = False
return ((self.name, self.start, self.step, self.consolidationFunc,
self.valuesPerPoint, self.options) ==
(other.name, other.start, other.step,
other.consolidationFunc, other.valuesPerPoint,
other.options)) and list.__eq__(self, other) and color_eq
return False
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator(list.__iter__(self))
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
else:
yield None
return
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable:
return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception(
"Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (
self.name, self.start, self.end, self.step)
class DataStore(object):
"""
Simple object to store results of multi fetches.
Also aids in looking up data by pathExpressions.
"""
def __init__(self):
self.paths = defaultdict(set)
self.data = defaultdict(list)
def get_paths(self, path_expr):
"""
Returns all paths found for path_expr
"""
return sorted(self.paths[path_expr])
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
})
def get_series_list(self, path_expr):
series_list = []
for path in self.get_paths(path_expr):
for data in self.data.get(path):
start, end, step = data['time_info']
series = TimeSeries(path, start, end, step, data['values'])
series.pathExpression = path_expr
series_list.append(series)
return series_list
def fetchData(requestContext, pathExprs):
from ..app import app
startTime = int(epoch(requestContext['startTime']))
endTime = int(epoch(requestContext['endTime']))
if 'now' in requestContext:
now = int(epoch(requestContext['now']))
else:
now = None
# Convert to list if given single path
if not isinstance(pathExprs, list):
pathExprs = [pathExprs]
data_store = DataStore()
multi_nodes = defaultdict(list)
single_nodes = []
path_to_exprs = defaultdict(list)
# Group nodes that support multiple fetches
for pathExpr in pathExprs:
for node in app.store.find(pathExpr, startTime, endTime):
if not node.is_leaf:
continue
if node.path not in path_to_exprs:
if hasattr(node, '__fetch_multi__'):
multi_nodes[node.__fetch_multi__].append(node)
else:
single_nodes.append(node)
path_to_exprs[node.path].append(pathExpr)
# Multi fetches
for finder in app.store.finders:
if not hasattr(finder, '__fetch_multi__'):
continue
nodes = multi_nodes[finder.__fetch_multi__]
if not nodes:
continue
try:
time_info, series = finder.fetch_multi(nodes, startTime, endTime,
now, requestContext)
except TypeError:
time_info, series = finder.fetch_multi(nodes, startTime, endTime)
for path, values in series.items():
data_store.add_data(path, time_info, values,
path_to_exprs[path])
# Single fetches
fetches = [
(node.path, node.fetch(startTime, endTime, now, requestContext))
for node in single_nodes
]
for path, results in fetches:
if not results:
logger.info("no results", path=path, start=startTime,
end=endTime)
continue
try:
time_info, values = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric "
"'%s': %s" % (path, e))
data_store.add_data(path, time_info, values, path_to_exprs[path])
return data_store
def nonempty(series):
for value in series:
if value is not None:
return True
return False<|fim▁end|> | from ..utils import epoch |
<|file_name|>cnetwork_tree_node.py<|end_file_name|><|fim▁begin|>""" Specify the NetworkNode with its action, context-menus """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
import os
# Enthought library imports
from traits.api import Instance, Str, Any
from traitsui.api import TreeNode
from traitsui.menu import Menu, Action, Separator
# ConnectomeViewer imports
from cviewer.plugins.cff2.cnetwork import CNetwork
# Logging import
import logging
logger = logging.getLogger('root.'+__name__)
class CNetworkTreeNode(TreeNode):
# The object that contains the container ;^)
parent = Any
# the network associated with this node
node_for=[CNetwork]
<|fim▁hole|> # Name of leaf item icon
icon_item=Str('home.png')
# Name of opened group item icon
icon_open=Str('home.png')
# labels
label='dname'
###
# Private Traits
# activate / deactivate logic
# if the node is activated, this means that there exists a
# corresponding RenderManager instance
_ShowName = Instance(Action,
kw={'name': 'Show name',
'action': 'object.show_name',
'tooltip': 'Shows the network name'}, )
_ChangeParameters = Instance(Action,
kw={'name': 'Edge Parameters',
'action': 'object._edge_parameters',
'tooltip': 'Thresholding and Change Attributes',
'enabled_when' : 'object.loaded == True'}, )
_RenderMatrixAction = Instance(Action,
kw={'name': 'Connectome Matrix Viewer',
'action': 'object.invoke_matrix_viewer',
'tooltip':'View the connectivity matrices',
'enabled_when':'object.loaded == True'}, )
# the menu shown after right-click
menu = Instance(Menu, transient=True)
def get_children(self, object):
""" Get the object's children. """
pass
# Collate the window's views into categories.
#return object.surfaces + object.volumes + object.tracks
######################################################################
# Non-public interface
######################################################################
def _menu_default(self):
""" Standard menus for network nodes """
menu_actions = []
return Menu( *menu_actions)<|fim▁end|> | # a default icons
# Name of group item icon
icon_group = Str('home.png') |
<|file_name|>cards.js<|end_file_name|><|fim▁begin|>// Models
app.SearchModel = Backbone.Model.extend({
idAttribute: "session_token",<|fim▁hole|> }
});
// Collections
app.SearchCollection = Backbone.Collection.extend({
model: app.SearchModel,
url: function() {
if (typeof this.id === 'undefined')
return '/search';
else
return '/search/' + this.id;
},
initialize: function(options) {
if (typeof options != 'undefined')
this.id = options.session_token;
}
});
// Views
app.cardList = Backbone.View.extend({
el: '#cardList'
});
app.cardView = Backbone.View.extend({
tagName: 'div',
initialize: function(card) {
this.card = card;
},
template: _.template($("#card-template").html()),
render: function(cardList) {
this.$el.html(this.template({
card: this.card
}));
this.$el.addClass('card');
cardList.$el.append(this.el);
return this;
}
});<|fim▁end|> | urlRoot: function() {
var u = '/search/' + this.id;
return u; |
<|file_name|>utility.ts<|end_file_name|><|fim▁begin|>const boolType = "boolean"
const funType = "function";
const stringType = "string";
export function isFunction(obj: any) {
if (!obj)
return null;
return typeof obj === funType;
}
<|fim▁hole|> return obj === undefined;
}
export function getFullDateFormated() {
function getTime(timeDigit: number) {
return timeDigit < 9 ? `0${timeDigit}` : timeDigit;
}
const date = new Date();
return `${date.getFullYear()}-${date.getMonth() + 1}-${date.getDate()}-${getTime(date.getHours() + 1)}:${getTime(date.getMinutes() + 1)}`;
}
export function isNull(obj: any) {
return obj === null;
}
export function isBoolean(obj: any) {
if (!obj)
return null;
return typeof obj === boolType;
}
export function isString(obj: any) {
if (!obj)
return null;
return typeof obj === stringType;
}
export function getTemplate(htmlTemplate: string) {
return function (data: any) {
let keys = Object.keys(data);
for (let i = 0; i < keys.length; i++) {
let reg = new RegExp((<any>getTemplate).interpolateReg.replace("key", keys[i]), 'g');
htmlTemplate = htmlTemplate.replace(reg, data[keys[i]]);
}
return htmlTemplate;
}
}
(<any>getTemplate).interpolateReg = "{{key}}";
export function getHtmlAsString(htmlPath: any) {
let fs = require('fs');
let path = require('path');
let strinfData = fs.readFileSync(path.resolve(`${__dirname}/${htmlPath}`), 'utf-8');
return strinfData;
}<|fim▁end|> | export function isUndefined(obj: any) { |
<|file_name|>Map.ts<|end_file_name|><|fim▁begin|>import {Sequelize} from 'sequelize';
import {DataTypes} from 'sequelize';
export default function (sequelize: Sequelize, DataTypes: DataTypes) {
let Map = sequelize.define('Map', {
uid: {
type: DataTypes.STRING,
allowNull: false
},
name: {
type: DataTypes.STRING,
allowNull: false
},
author: {
type: DataTypes.STRING,
allowNull: false
},
environment: {
type: DataTypes.STRING,
allowNull: false
}
}, {<|fim▁hole|> return Map;
}<|fim▁end|> | tableName: 'core__map',
charset: 'utf8'
});
|
<|file_name|>keyboard_win32_unittests.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "flutter/shell/platform/common/json_message_codec.h"
#include "flutter/shell/platform/embedder/embedder.h"
#include "flutter/shell/platform/embedder/test_utils/key_codes.h"
#include "flutter/shell/platform/windows/flutter_windows_engine.h"
#include "flutter/shell/platform/windows/flutter_windows_view.h"
#include "flutter/shell/platform/windows/keyboard_key_channel_handler.h"
#include "flutter/shell/platform/windows/keyboard_key_embedder_handler.h"
#include "flutter/shell/platform/windows/keyboard_key_handler.h"
#include "flutter/shell/platform/windows/keyboard_manager_win32.h"
#include "flutter/shell/platform/windows/testing/engine_modifier.h"
#include "flutter/shell/platform/windows/testing/mock_window_binding_handler.h"
#include "flutter/shell/platform/windows/testing/test_keyboard.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "rapidjson/stringbuffer.h"
#include "rapidjson/writer.h"
#include <functional>
#include <list>
#include <vector>
using testing::_;
using testing::Invoke;
using testing::Return;
using namespace ::flutter::testing::keycodes;
namespace flutter {
namespace testing {
namespace {
constexpr SHORT kStateMaskToggled = 0x01;
constexpr SHORT kStateMaskPressed = 0x80;
constexpr uint64_t kScanCodeBackquote = 0x29;
constexpr uint64_t kScanCodeKeyA = 0x1e;
constexpr uint64_t kScanCodeKeyB = 0x30;
constexpr uint64_t kScanCodeKeyE = 0x12;
constexpr uint64_t kScanCodeKeyF = 0x21;
constexpr uint64_t kScanCodeKeyO = 0x18;
constexpr uint64_t kScanCodeKeyQ = 0x10;
constexpr uint64_t kScanCodeKeyW = 0x11;
constexpr uint64_t kScanCodeDigit1 = 0x02;
constexpr uint64_t kScanCodeDigit6 = 0x07;
// constexpr uint64_t kScanCodeNumpad1 = 0x4f;
// constexpr uint64_t kScanCodeNumLock = 0x45;
constexpr uint64_t kScanCodeControl = 0x1d;
constexpr uint64_t kScanCodeMetaLeft = 0x5b;
constexpr uint64_t kScanCodeMetaRight = 0x5c;
constexpr uint64_t kScanCodeAlt = 0x38;
constexpr uint64_t kScanCodeShiftLeft = 0x2a;
constexpr uint64_t kScanCodeShiftRight = 0x36;
constexpr uint64_t kScanCodeBracketLeft = 0x1a;
constexpr uint64_t kScanCodeArrowLeft = 0x4b;
constexpr uint64_t kScanCodeEnter = 0x1c;
constexpr uint64_t kScanCodeBackspace = 0x0e;
constexpr uint64_t kVirtualDigit1 = 0x31;
constexpr uint64_t kVirtualKeyA = 0x41;
constexpr uint64_t kVirtualKeyB = 0x42;
constexpr uint64_t kVirtualKeyE = 0x45;
constexpr uint64_t kVirtualKeyF = 0x46;
constexpr uint64_t kVirtualKeyO = 0x4f;
constexpr uint64_t kVirtualKeyQ = 0x51;
constexpr uint64_t kVirtualKeyW = 0x57;
constexpr bool kSynthesized = true;
constexpr bool kNotSynthesized = false;
typedef UINT (*MapVirtualKeyLayout)(UINT uCode, UINT uMapType);
typedef std::function<UINT(UINT)> MapVirtualKeyToChar;
UINT LayoutDefault(UINT uCode, UINT uMapType) {
return MapVirtualKey(uCode, uMapType);
}
UINT LayoutFrench(UINT uCode, UINT uMapType) {
switch (uMapType) {
case MAPVK_VK_TO_CHAR:
switch (uCode) {
case 0xDD:
return 0x8000005E;
default:
return MapVirtualKey(uCode, MAPVK_VK_TO_CHAR);
}
default:
return MapVirtualKey(uCode, uMapType);
}
}
class TestKeyboardManagerWin32 : public KeyboardManagerWin32 {
public:
explicit TestKeyboardManagerWin32(WindowDelegate* delegate)
: KeyboardManagerWin32(delegate) {}
bool DuringRedispatch() { return during_redispatch_; }
protected:
void RedispatchEvent(std::unique_ptr<PendingEvent> event) override {
assert(!during_redispatch_);
during_redispatch_ = true;
KeyboardManagerWin32::RedispatchEvent(std::move(event));
during_redispatch_ = false;
}
private:
bool during_redispatch_ = false;
};
// Injecting this kind of keyboard change means that a key state (the true
// state for a key, typically a modifier) should be changed.
struct KeyStateChange {
uint32_t key;
bool pressed;
bool toggled_on;
};
// Injecting this kind of keyboard change does not make any changes to the
// keyboard system, but indicates that a forged event is expected here, and
// that `KeyStateChange`s after this will be applied only after the forged
// event.
//
// See `IsKeyDownAltRight` for explaination for foged events.
struct ExpectForgedMessage {
explicit ExpectForgedMessage(Win32Message message) : message(message){};
Win32Message message;
};
struct KeyboardChange {
// The constructors are intentionally for implicit conversion.
KeyboardChange(Win32Message message) : type(kMessage) {
content.message = message;
}
KeyboardChange(KeyStateChange change) : type(kKeyStateChange) {
content.key_state_change = change;
}
KeyboardChange(ExpectForgedMessage forged_message)
: type(kExpectForgedMessage) {
content.expected_forged_message = forged_message.message;
}
enum Type {
kMessage,
kKeyStateChange,
kExpectForgedMessage,
} type;
union {
Win32Message message;
KeyStateChange key_state_change;
Win32Message expected_forged_message;
} content;
};
class TestKeystate {
public:
void Set(uint32_t virtual_key, bool pressed, bool toggled_on = false) {
state_[virtual_key] = (pressed ? kStateMaskPressed : 0) |
(toggled_on ? kStateMaskToggled : 0);
}
SHORT Get(uint32_t virtual_key) { return state_[virtual_key]; }
private:
std::map<uint32_t, SHORT> state_;
};
class MockKeyboardManagerWin32Delegate
: public KeyboardManagerWin32::WindowDelegate,
protected MockMessageQueue {
public:
MockKeyboardManagerWin32Delegate(WindowBindingHandlerDelegate* view,
MapVirtualKeyToChar map_vk_to_char)
: view_(view), map_vk_to_char_(std::move(map_vk_to_char)) {
keyboard_manager_ = std::make_unique<TestKeyboardManagerWin32>(this);
}
virtual ~MockKeyboardManagerWin32Delegate() {}
// |KeyboardManagerWin32::WindowDelegate|
void OnKey(int key,
int scancode,
int action,
char32_t character,
bool extended,
bool was_down,
KeyEventCallback callback) override {
view_->OnKey(key, scancode, action, character, extended, was_down,
callback);
}
// |KeyboardManagerWin32::WindowDelegate|
void OnText(const std::u16string& text) override { view_->OnText(text); }
SHORT GetKeyState(int virtual_key) { return key_state_.Get(virtual_key); }
void InjectKeyboardChanges(std::vector<KeyboardChange> changes) {
// First queue all messages to enable peeking.
for (const KeyboardChange& change : changes) {
switch (change.type) {
case KeyboardChange::kMessage:
PushBack(&change.content.message);
break;
default:
break;
}
}
for (const KeyboardChange& change : changes) {
switch (change.type) {
case KeyboardChange::kMessage:
DispatchFront();
break;
case KeyboardChange::kExpectForgedMessage:
forged_message_expectations_.push_back(ForgedMessageExpectation{
.message = change.content.expected_forged_message,
});
break;
case KeyboardChange::kKeyStateChange: {
const KeyStateChange& state_change = change.content.key_state_change;
if (forged_message_expectations_.empty()) {
key_state_.Set(state_change.key, state_change.pressed,
state_change.toggled_on);
} else {
forged_message_expectations_.back()
.state_changes_afterwards.push_back(state_change);
}
break;
}
default:
assert(false);
}
}
}
std::list<Win32Message>& RedispatchedMessages() {
return redispatched_messages_;
}
protected:
BOOL Win32PeekMessage(LPMSG lpMsg,
UINT wMsgFilterMin,
UINT wMsgFilterMax,
UINT wRemoveMsg) override {
return MockMessageQueue::Win32PeekMessage(lpMsg, wMsgFilterMin,
wMsgFilterMax, wRemoveMsg);
}
uint32_t Win32MapVkToChar(uint32_t virtual_key) override {
return map_vk_to_char_(virtual_key);
}
// This method is called for each message injected by test cases with
// `tester.InjectMessages`.
LRESULT Win32SendMessage(UINT const message,
WPARAM const wparam,
LPARAM const lparam) override {
return keyboard_manager_->HandleMessage(message, wparam, lparam)
? 0
: kWmResultDefault;
}
// This method is called when the keyboard manager redispatches messages
// or forges messages (such as CtrlLeft up right before AltGr up).
UINT Win32DispatchMessage(UINT Msg, WPARAM wParam, LPARAM lParam) override {
bool handled = keyboard_manager_->HandleMessage(Msg, wParam, lParam);
if (keyboard_manager_->DuringRedispatch()) {
redispatched_messages_.push_back(Win32Message{
.message = Msg,
.wParam = wParam,
.lParam = lParam,
});
EXPECT_FALSE(handled);
} else {
EXPECT_FALSE(forged_message_expectations_.empty());
ForgedMessageExpectation expectation =
forged_message_expectations_.front();
forged_message_expectations_.pop_front();
EXPECT_EQ(expectation.message.message, Msg);
EXPECT_EQ(expectation.message.wParam, wParam & 0xffffffff);
EXPECT_EQ(expectation.message.lParam, lParam & 0xffffffff);
if (expectation.message.expected_result != kWmResultDontCheck) {
EXPECT_EQ(expectation.message.expected_result,
handled ? kWmResultZero : kWmResultDefault);
}
for (const KeyStateChange& change :
expectation.state_changes_afterwards) {
key_state_.Set(change.key, change.pressed, change.toggled_on);
}
}
return 0;
}
private:
struct ForgedMessageExpectation {
Win32Message message;
std::list<KeyStateChange> state_changes_afterwards;
};
WindowBindingHandlerDelegate* view_;
std::unique_ptr<TestKeyboardManagerWin32> keyboard_manager_;
std::list<ForgedMessageExpectation> forged_message_expectations_;
MapVirtualKeyToChar map_vk_to_char_;
TestKeystate key_state_;
std::list<Win32Message> redispatched_messages_;
};
// A FlutterWindowsView that overrides the RegisterKeyboardHandlers function
// to register the keyboard hook handlers that can be spied upon.
class TestFlutterWindowsView : public FlutterWindowsView {
public:
typedef std::function<void(const std::u16string& text)> U16StringHandler;
TestFlutterWindowsView(
U16StringHandler on_text,
KeyboardKeyEmbedderHandler::GetKeyStateHandler get_keyboard_state,
KeyboardKeyEmbedderHandler::MapVirtualKeyToScanCode map_vk_to_scan)
// The WindowBindingHandler is used for window size and such, and doesn't
// affect keyboard.
: FlutterWindowsView(
std::make_unique<::testing::NiceMock<MockWindowBindingHandler>>()),
get_keyboard_state_(std::move(get_keyboard_state)),
map_vk_to_scan_(std::move(map_vk_to_scan)),
on_text_(std::move(on_text)) {}
void OnText(const std::u16string& text) override { on_text_(text); }
void HandleMessage(const char* channel,
const char* method,
const char* args) {
rapidjson::Document args_doc;
args_doc.Parse(args);
assert(!args_doc.HasParseError());
rapidjson::Document message_doc(rapidjson::kObjectType);
auto& allocator = message_doc.GetAllocator();
message_doc.AddMember("method", rapidjson::Value(method, allocator),
allocator);
message_doc.AddMember("args", args_doc, allocator);
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
message_doc.Accept(writer);
std::unique_ptr<std::vector<uint8_t>> data =
JsonMessageCodec::GetInstance().EncodeMessage(message_doc);
FlutterPlatformMessageResponseHandle response_handle;
const FlutterPlatformMessage message = {
sizeof(FlutterPlatformMessage), // struct_size
channel, // channel
data->data(), // message
data->size(), // message_size
&response_handle, // response_handle
};
GetEngine()->HandlePlatformMessage(&message);
}
protected:
std::unique_ptr<KeyboardHandlerBase> CreateKeyboardKeyHandler(
BinaryMessenger* messenger,
KeyboardKeyEmbedderHandler::GetKeyStateHandler get_key_state,
KeyboardKeyEmbedderHandler::MapVirtualKeyToScanCode map_vk_to_scan)
override {
return FlutterWindowsView::CreateKeyboardKeyHandler(
messenger,
[this](int virtual_key) { return get_keyboard_state_(virtual_key); },
[this](int virtual_key, bool extended) {
return map_vk_to_scan_(virtual_key, extended);
});
}
private:
U16StringHandler on_text_;
KeyboardKeyEmbedderHandler::GetKeyStateHandler get_keyboard_state_;
KeyboardKeyEmbedderHandler::MapVirtualKeyToScanCode map_vk_to_scan_;
};
typedef struct {
enum {
kKeyCallOnKey,
kKeyCallOnText,
kKeyCallTextMethodCall,
} type;
// Only one of the following fields should be assigned.
FlutterKeyEvent key_event; // For kKeyCallOnKey
std::u16string text; // For kKeyCallOnText
std::string text_method_call; // For kKeyCallTextMethodCall
} KeyCall;
static std::vector<KeyCall> key_calls;
void clear_key_calls() {
for (KeyCall& key_call : key_calls) {
if (key_call.type == KeyCall::kKeyCallOnKey &&
key_call.key_event.character != nullptr) {
delete[] key_call.key_event.character;
}
}
key_calls.clear();
}
class KeyboardTester {
public:
using ResponseHandler =
std::function<void(MockKeyResponseController::ResponseCallback)>;
explicit KeyboardTester()
: callback_handler_(RespondValue(false)),
map_virtual_key_layout_(LayoutDefault) {
view_ = std::make_unique<TestFlutterWindowsView>(
[](const std::u16string& text) {
key_calls.push_back(KeyCall{
.type = KeyCall::kKeyCallOnText,
.text = text,
});
},
[this](int virtual_key) -> SHORT {
// `window_` is not initialized yet when this callback is first
// called.
return window_ ? window_->GetKeyState(virtual_key) : 0;
},
[this](UINT virtual_key, bool extended) -> SHORT {
return map_virtual_key_layout_(
virtual_key, extended ? MAPVK_VK_TO_VSC_EX : MAPVK_VK_TO_VSC);
});
view_->SetEngine(GetTestEngine(
[&callback_handler = callback_handler_](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
FlutterKeyEvent clone_event = *event;
clone_event.character = event->character == nullptr
? nullptr
: clone_string(event->character);
key_calls.push_back(KeyCall{
.type = KeyCall::kKeyCallOnKey,
.key_event = clone_event,
});
callback_handler(event, callback);
}));
window_ = std::make_unique<MockKeyboardManagerWin32Delegate>(
view_.get(), [this](UINT virtual_key) -> SHORT {
return map_virtual_key_layout_(virtual_key, MAPVK_VK_TO_CHAR);
});
}
TestFlutterWindowsView& GetView() { return *view_; }
MockKeyboardManagerWin32Delegate& GetWindow() { return *window_; }
// Set all events to be handled (true) or unhandled (false).
void Responding(bool response) { callback_handler_ = RespondValue(response); }
// Manually handle event callback of the onKeyData embedder API.
//
// On every onKeyData call, the |handler| will be invoked with the target
// key data and the result callback. Immediately calling the callback with
// a boolean is equivalent to setting |Responding| with the boolean. However,
// |LateResponding| allows storing the callback to call later.
void LateResponding(
MockKeyResponseController::EmbedderCallbackHandler handler) {
callback_handler_ = std::move(handler);
}
void SetLayout(MapVirtualKeyLayout layout) {
map_virtual_key_layout_ = layout == nullptr ? LayoutDefault : layout;
}
void InjectKeyboardChanges(std::vector<KeyboardChange> changes) {
assert(window_ != nullptr);
window_->InjectKeyboardChanges(changes);
}
// Get the number of redispatched messages since the last clear, then clear
// the counter.
size_t RedispatchedMessageCountAndClear() {
auto& messages = window_->RedispatchedMessages();
size_t count = messages.size();
messages.clear();
return count;
}
private:
std::unique_ptr<TestFlutterWindowsView> view_;
std::unique_ptr<MockKeyboardManagerWin32Delegate> window_;
MockKeyResponseController::EmbedderCallbackHandler callback_handler_;
MapVirtualKeyLayout map_virtual_key_layout_;
// Returns an engine instance configured with dummy project path values, and
// overridden methods for sending platform messages, so that the engine can
// respond as if the framework were connected.
static std::unique_ptr<FlutterWindowsEngine> GetTestEngine(
MockKeyResponseController::EmbedderCallbackHandler
embedder_callback_handler) {
FlutterDesktopEngineProperties properties = {};
properties.assets_path = L"C:\\foo\\flutter_assets";
properties.icu_data_path = L"C:\\foo\\icudtl.dat";
properties.aot_library_path = L"C:\\foo\\aot.so";
FlutterProjectBundle project(properties);
auto engine = std::make_unique<FlutterWindowsEngine>(project);
EngineModifier modifier(engine.get());
auto key_response_controller =
std::make_shared<MockKeyResponseController>();
key_response_controller->SetEmbedderResponse(
std::move(embedder_callback_handler));
key_response_controller->SetTextInputResponse(
[](std::unique_ptr<rapidjson::Document> document) {
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
document->Accept(writer);
key_calls.push_back(KeyCall{
.type = KeyCall::kKeyCallTextMethodCall,
.text_method_call = buffer.GetString(),
});
});
MockEmbedderApiForKeyboard(modifier, key_response_controller);
engine->RunWithEntrypoint(nullptr);
return engine;
}
static MockKeyResponseController::EmbedderCallbackHandler RespondValue(
bool value) {
return [value](const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
callback(value);
};
}
};
} // namespace
// Define compound `expect` in macros. If they're defined in functions, the
// stacktrace wouldn't print where the function is called in the unit tests.
#define EXPECT_CALL_IS_EVENT(_key_call, ...) \
EXPECT_EQ(_key_call.type, KeyCall::kKeyCallOnKey); \
EXPECT_EVENT_EQUALS(_key_call.key_event, __VA_ARGS__);
#define EXPECT_CALL_IS_TEXT(_key_call, u16_string) \
EXPECT_EQ(_key_call.type, KeyCall::kKeyCallOnText); \
EXPECT_EQ(_key_call.text, u16_string);
#define EXPECT_CALL_IS_TEXT_METHOD_CALL(_key_call, json_string) \
EXPECT_EQ(_key_call.type, KeyCall::kKeyCallTextMethodCall); \
EXPECT_STREQ(_key_call.text_method_call.c_str(), json_string);
TEST(KeyboardTest, LowerCaseAHandled) {
KeyboardTester tester;
tester.Responding(true);
// US Keyboard layout
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(<|fim▁hole|> EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, LowerCaseAUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, ArrowLeftHandled) {
KeyboardTester tester;
tester.Responding(true);
// US Keyboard layout
// Press ArrowLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{VK_LEFT, kScanCodeArrowLeft, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalArrowLeft, kLogicalArrowLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release ArrowLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{VK_LEFT, kScanCodeArrowLeft, kExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalArrowLeft,
kLogicalArrowLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, ArrowLeftUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ArrowLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{VK_LEFT, kScanCodeArrowLeft, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalArrowLeft, kLogicalArrowLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ArrowLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{VK_LEFT, kScanCodeArrowLeft, kExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalArrowLeft,
kLogicalArrowLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, ShiftLeftUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, true, false},
WmKeyDownInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftLeft, kLogicalShiftLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, false, true},
WmKeyUpInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalShiftLeft,
kLogicalShiftLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, ShiftRightUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ShiftRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RSHIFT, true, false},
WmKeyDownInfo{VK_SHIFT, kScanCodeShiftRight, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftRight, kLogicalShiftRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ShiftRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RSHIFT, false, true},
WmKeyUpInfo{VK_SHIFT, kScanCodeShiftRight, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalShiftRight, kLogicalShiftRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, CtrlLeftUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press CtrlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, false},
WmKeyDownInfo{VK_CONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release CtrlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true},
WmKeyUpInfo{VK_SHIFT, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, CtrlRightUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press CtrlRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RCONTROL, true, false},
WmKeyDownInfo{VK_CONTROL, kScanCodeControl, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlRight, kLogicalControlRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release CtrlRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RCONTROL, false, true},
WmKeyUpInfo{VK_CONTROL, kScanCodeControl, kExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlRight, kLogicalControlRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, AltLeftUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press AltLeft. AltLeft is a SysKeyDown event.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LMENU, true, false},
WmSysKeyDownInfo{VK_MENU, kScanCodeAlt, kNotExtended, kWasUp}.Build(
kWmResultDefault)}); // Always pass to the default WndProc.
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalAltLeft,
kLogicalAltLeft, "", kNotSynthesized);
clear_key_calls();
// Don't redispatch sys messages.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release AltLeft. AltLeft is a SysKeyUp event.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LMENU, false, true},
WmSysKeyUpInfo{VK_MENU, kScanCodeAlt, kNotExtended}.Build(
kWmResultDefault)}); // Always pass to the default WndProc.
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalAltLeft,
kLogicalAltLeft, "", kNotSynthesized);
clear_key_calls();
// Don't redispatch sys messages.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, AltRightUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press AltRight. AltRight is a SysKeyDown event.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RMENU, true, false},
WmSysKeyDownInfo{VK_MENU, kScanCodeAlt, kExtended, kWasUp}.Build(
kWmResultDefault)}); // Always pass to the default WndProc.
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalAltRight, kLogicalAltRight, "",
kNotSynthesized);
clear_key_calls();
// Don't redispatch sys messages.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release AltRight. AltRight is a SysKeyUp event.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RMENU, false, true},
WmSysKeyUpInfo{VK_MENU, kScanCodeAlt, kExtended}.Build(
kWmResultDefault)}); // Always pass to the default WndProc.
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalAltRight,
kLogicalAltRight, "", kNotSynthesized);
clear_key_calls();
// Don't redispatch sys messages.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, MetaLeftUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press MetaLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LWIN, true, false},
WmKeyDownInfo{VK_LWIN, kScanCodeMetaLeft, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalMetaLeft, kLogicalMetaLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release MetaLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LWIN, false, true},
WmKeyUpInfo{VK_LWIN, kScanCodeMetaLeft, kExtended}.Build(kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalMetaLeft,
kLogicalMetaLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, MetaRightUnhandled) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press MetaRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RWIN, true, false},
WmKeyDownInfo{VK_RWIN, kScanCodeMetaRight, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalMetaRight, kLogicalMetaRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release MetaRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RWIN, false, true},
WmKeyUpInfo{VK_RWIN, kScanCodeMetaRight, kExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalMetaRight,
kLogicalMetaRight, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Press Shift-A. This is special because Win32 gives 'A' as character for the
// KeyA press.
TEST(KeyboardTest, ShiftLeftKeyA) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, true, true},
WmKeyDownInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftLeft, kLogicalShiftLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'A', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "A", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"A");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, false, true},
WmKeyUpInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalShiftLeft,
kLogicalShiftLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Press Ctrl-A. This is special because Win32 gives 0x01 as character for the
// KeyA press.
TEST(KeyboardTest, CtrlLeftKeyA) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ControlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, true},
WmKeyDownInfo{VK_CONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0x01, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ControlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true},
WmKeyUpInfo{VK_CONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Press Ctrl-1. This is special because it yields no WM_CHAR for the 1.
TEST(KeyboardTest, CtrlLeftDigit1) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press ControlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, true},
WmKeyDownInfo{VK_CONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended, kWasUp}
.Build(kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalDigit1,
kLogicalDigit1, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalDigit1,
kLogicalDigit1, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ControlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true},
WmKeyUpInfo{VK_CONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Press 1 on a French keyboard. This is special because it yields WM_CHAR
// with char_code '&'.
TEST(KeyboardTest, Digit1OnFrenchLayout) {
KeyboardTester tester;
tester.Responding(false);
tester.SetLayout(LayoutFrench);
// Press 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended, kWasUp}
.Build(kWmResultZero),
WmCharInfo{'&', kScanCodeDigit1, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalDigit1,
kLogicalDigit1, "&", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"&");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalDigit1,
kLogicalDigit1, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// This tests AltGr-Q on a German keyboard, which should print '@'.
TEST(KeyboardTest, AltGrModifiedKey) {
KeyboardTester tester;
tester.Responding(false);
// German Keyboard layout
// Press AltGr, which Win32 precedes with a ContrlLeft down.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, true},
WmKeyDownInfo{VK_LCONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero),
KeyStateChange{VK_RMENU, true, true},
WmKeyDownInfo{VK_MENU, kScanCodeAlt, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeDown,
kPhysicalAltRight, kLogicalAltRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Press Q
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyQ, kScanCodeKeyQ, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'@', kScanCodeKeyQ, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyQ,
kLogicalKeyQ, "@", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"@");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release Q
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyQ, kScanCodeKeyQ, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyQ,
kLogicalKeyQ, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release AltGr. Win32 doesn't dispatch ControlLeft up. Instead Flutter will
// forge one. The AltGr is a system key, therefore will be handled by Win32's
// default WndProc.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true},
ExpectForgedMessage{
WmKeyUpInfo{VK_LCONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)},
KeyStateChange{VK_RMENU, false, true},
WmSysKeyUpInfo{VK_MENU, kScanCodeAlt, kExtended}.Build(
kWmResultDefault)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalAltRight,
kLogicalAltRight, "", kNotSynthesized);
clear_key_calls();
// The sys key up must not be redispatched. The forged ControlLeft up will.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Test the following two key sequences at the same time:
//
// 1. Tap AltGr, then tap AltGr.
// 2. Tap AltGr, hold CtrlLeft, tap AltGr, release CtrlLeft.
//
// The two sequences are indistinguishable until the very end when a CtrlLeft
// up event might or might not follow.
//
// Sequence 1: CtrlLeft down, AltRight down, AltRight up
// Sequence 2: CtrlLeft down, AltRight down, AltRight up, CtrlLeft up
//
// This is because pressing AltGr alone causes Win32 to send a fake "CtrlLeft
// down" event first (see |IsKeyDownAltRight| for detailed explanation).
TEST(KeyboardTest, AltGrTwice) {
KeyboardTester tester;
tester.Responding(false);
// 1. AltGr down.
// The key down event causes a ControlLeft down and a AltRight (extended
// AltLeft) down.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, true},
WmKeyDownInfo{VK_LCONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero),
KeyStateChange{VK_RMENU, true, true},
WmKeyDownInfo{VK_MENU, kScanCodeAlt, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeDown,
kPhysicalAltRight, kLogicalAltRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// 2. AltGr up.
// The key up event only causes a AltRight (extended AltLeft) up.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true},
ExpectForgedMessage{
WmKeyUpInfo{VK_LCONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)},
KeyStateChange{VK_RMENU, false, true},
WmSysKeyUpInfo{VK_MENU, kScanCodeAlt, kExtended}.Build(
kWmResultDefault)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalAltRight,
kLogicalAltRight, "", kNotSynthesized);
clear_key_calls();
// The sys key up must not be redispatched. The forged ControlLeft up will.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// 3. AltGr down (or: ControlLeft down then AltRight down.)
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, false},
WmKeyDownInfo{VK_LCONTROL, kScanCodeControl, kNotExtended, kWasUp}.Build(
kWmResultZero),
KeyStateChange{VK_RMENU, true, true},
WmKeyDownInfo{VK_MENU, kScanCodeAlt, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeDown,
kPhysicalAltRight, kLogicalAltRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// 4. AltGr up.
// The key up event only causes a AltRight (extended AltLeft) up.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, false},
ExpectForgedMessage{
WmKeyUpInfo{VK_LCONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)},
KeyStateChange{VK_RMENU, false, false},
WmSysKeyUpInfo{VK_MENU, kScanCodeAlt, kExtended}.Build(
kWmResultDefault)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalAltRight,
kLogicalAltRight, "", kNotSynthesized);
clear_key_calls();
// The sys key up must not be redispatched. The forged ControlLeft up will.
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// 5. For key sequence 2: a real ControlLeft up.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{VK_LCONTROL, kScanCodeControl, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, 0, 0, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
// This tests dead key ^ then E on a French keyboard, which should be combined
// into ê.
TEST(KeyboardTest, DeadKeyThatCombines) {
KeyboardTester tester;
tester.Responding(false);
tester.SetLayout(LayoutFrench);
// Press ^¨ (US: Left bracket)
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{0xDD, kScanCodeBracketLeft, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmDeadCharInfo{'^', kScanCodeBracketLeft, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBracketLeft, kLogicalBracketRight, "^",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release ^¨
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{0xDD, kScanCodeBracketLeft, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalBracketLeft, kLogicalBracketRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press E
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyE, kScanCodeKeyE, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0xEA, kScanCodeKeyE, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyE,
kLogicalKeyE, "ê", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"ê");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release E
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyE, kScanCodeKeyE, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyE,
kLogicalKeyE, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// This tests dead key ^ then E on a US INTL keyboard, which should be combined
// into ê.
//
// It is different from French AZERTY because the character that the ^ key is
// mapped to does not contain the dead key character somehow.
TEST(KeyboardTest, DeadKeyWithoutDeadMaskThatCombines) {
KeyboardTester tester;
tester.Responding(false);
// Press ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, true, true},
WmKeyDownInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftLeft, kLogicalShiftLeft, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press 6^
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{'6', kScanCodeDigit6, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmDeadCharInfo{'^', kScanCodeDigit6, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalDigit6,
kLogicalDigit6, "6", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release 6^
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{'6', kScanCodeDigit6, kNotExtended}.Build(kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalDigit6,
kLogicalDigit6, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Release ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, false, true},
WmKeyUpInfo{VK_SHIFT, kScanCodeShiftLeft, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalShiftLeft,
kLogicalShiftLeft, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press E
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyE, kScanCodeKeyE, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0xEA, kScanCodeKeyE, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyE,
kLogicalKeyE, "ê", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"ê");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release E
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyE, kScanCodeKeyE, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyE,
kLogicalKeyE, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// This tests dead key ^ then & (US: 1) on a French keyboard, which do not
// combine and should output "^&".
TEST(KeyboardTest, DeadKeyThatDoesNotCombine) {
KeyboardTester tester;
tester.Responding(false);
tester.SetLayout(LayoutFrench);
// Press ^¨ (US: Left bracket)
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{0xDD, kScanCodeBracketLeft, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmDeadCharInfo{'^', kScanCodeBracketLeft, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBracketLeft, kLogicalBracketRight, "^",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release ^¨
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{0xDD, kScanCodeBracketLeft, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalBracketLeft, kLogicalBracketRight, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended, kWasUp}
.Build(kWmResultZero),
WmCharInfo{'^', kScanCodeDigit1, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'&', kScanCodeDigit1, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 3);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalDigit1,
kLogicalDigit1, "^", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"^");
EXPECT_CALL_IS_TEXT(key_calls[2], u"&");
clear_key_calls();
// TODO(dkwingsmt): This count should probably be 3. Currently the '^'
// message is redispatched due to being part of the KeyDown session, which is
// not handled by the framework, while the '&' message is not redispatched
// for being a standalone message. We should resolve this inconsistency.
// https://github.com/flutter/flutter/issues/98306
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release 1
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualDigit1, kScanCodeDigit1, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalDigit1,
kLogicalDigit1, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// This tests dead key `, then dead key `, then e.
//
// It should output ``e, instead of `è.
TEST(KeyboardTest, DeadKeyTwiceThenLetter) {
KeyboardTester tester;
tester.Responding(false);
// US INTL layout.
// Press `
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{0xC0, kScanCodeBackquote, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmDeadCharInfo{'`', kScanCodeBackquote, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBackquote, kLogicalBackquote, "`",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release `
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{0xC0, kScanCodeBackquote, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalBackquote,
kLogicalBackquote, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press ` again.
// The response should be slow.
std::vector<MockKeyResponseController::ResponseCallback> recorded_callbacks;
tester.LateResponding(
[&recorded_callbacks](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
recorded_callbacks.push_back(callback);
});
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{0xC0, kScanCodeBackquote, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'`', kScanCodeBackquote, kNotExtended, kWasUp, kBeingReleased,
kNoContext, 1, /*bit25*/ true}
.Build(kWmResultZero),
WmCharInfo{'`', kScanCodeBackquote, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(recorded_callbacks.size(), 1);
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBackquote, kLogicalBackquote, "`",
kNotSynthesized);
clear_key_calls();
// Key down event responded with false.
recorded_callbacks.front()(false);
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_TEXT(key_calls[0], u"`");
EXPECT_CALL_IS_TEXT(key_calls[1], u"`");
clear_key_calls();
// TODO(dkwingsmt): This count should probably be 3. See the comment above
// that is marked with the same issue.
// https://github.com/flutter/flutter/issues/98306
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
tester.Responding(false);
// Release `
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{0xC0, kScanCodeBackquote, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalBackquote,
kLogicalBackquote, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// This tests when the resulting character needs to be combined with surrogates.
TEST(KeyboardTest, MultibyteCharacter) {
KeyboardTester tester;
tester.Responding(false);
// Gothic Keyboard layout. (We need a layout that yields non-BMP characters
// without IME, which is actually very rare.)
// Press key W of a US keyboard, which should yield character '𐍅'.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyW, kScanCodeKeyW, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0xd800, kScanCodeKeyW, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0xdf45, kScanCodeKeyW, kNotExtended, kWasUp}.Build(
kWmResultZero)});
const char* st = key_calls[0].key_event.character;
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyW,
kLogicalKeyW, "𐍅", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"𐍅");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 3);
// Release W
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyW, kScanCodeKeyW, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyW,
kLogicalKeyW, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
TEST(KeyboardTest, SynthesizeModifiers) {
KeyboardTester tester;
tester.Responding(false);
// Two dummy events used to trigger synthesization.
Win32Message event1 =
WmKeyDownInfo{VK_BACK, kScanCodeBackspace, kNotExtended, kWasUp}.Build(
kWmResultZero);
Win32Message event2 =
WmKeyUpInfo{VK_BACK, kScanCodeBackspace, kNotExtended}.Build(
kWmResultZero);
// ShiftLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftLeft, kLogicalShiftLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LSHIFT, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalShiftLeft,
kLogicalShiftLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// ShiftRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RSHIFT, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalShiftRight, kLogicalShiftRight, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RSHIFT, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalShiftRight, kLogicalShiftRight, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// ControlLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlLeft, kLogicalControlLeft, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LCONTROL, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlLeft, kLogicalControlLeft, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// ControlRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RCONTROL, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalControlRight, kLogicalControlRight, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RCONTROL, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalControlRight, kLogicalControlRight, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// AltLeft
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LMENU, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalAltLeft,
kLogicalAltLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LMENU, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalAltLeft,
kLogicalAltLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// AltRight
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RMENU, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalAltRight, kLogicalAltRight, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RMENU, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalAltRight,
kLogicalAltRight, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// MetaLeft
tester.InjectKeyboardChanges(
std::vector<KeyboardChange>{KeyStateChange{VK_LWIN, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalMetaLeft, kLogicalMetaLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_LWIN, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalMetaLeft,
kLogicalMetaLeft, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// MetaRight
tester.InjectKeyboardChanges(
std::vector<KeyboardChange>{KeyStateChange{VK_RWIN, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalMetaRight, kLogicalMetaRight, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RWIN, false, true}, event2});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalMetaRight,
kLogicalMetaRight, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// CapsLock, phase 0 -> 2 -> 0.
// (For phases, see |SynchronizeCritialToggledStates|.)
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_CAPITAL, false, true}, event1});
EXPECT_EQ(key_calls.size(), 3);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalCapsLock, kLogicalCapsLock, "", kSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalCapsLock,
kLogicalCapsLock, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_CAPITAL, false, false}, event2});
EXPECT_EQ(key_calls.size(), 3);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalCapsLock, kLogicalCapsLock, "", kSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalCapsLock,
kLogicalCapsLock, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// ScrollLock, phase 0 -> 1 -> 3
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_SCROLL, true, true}, event1});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalScrollLock, kLogicalScrollLock, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_SCROLL, true, false}, event2});
EXPECT_EQ(key_calls.size(), 3);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp,
kPhysicalScrollLock, kLogicalScrollLock, "",
kSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeDown,
kPhysicalScrollLock, kLogicalScrollLock, "",
kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// NumLock, phase 0 -> 3 -> 2
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_NUMLOCK, true, false}, event1});
// TODO(dkwingsmt): Synthesizing from phase 0 to 3 should yield a full key
// tap and a key down. Fix the algorithm so that the following result becomes
// 4 keycalls with an extra pair of key down and up.
// https://github.com/flutter/flutter/issues/98533
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalNumLock,
kLogicalNumLock, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_NUMLOCK, false, true}, event2});
EXPECT_EQ(key_calls.size(), 4);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalNumLock,
kLogicalNumLock, "", kSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeDown, kPhysicalNumLock,
kLogicalNumLock, "", kSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[2], kFlutterKeyEventTypeUp, kPhysicalNumLock,
kLogicalNumLock, "", kSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
}
// Pressing extended keys during IME events should work properly by not sending
// any events.
//
// Regression test for https://github.com/flutter/flutter/issues/95888 .
TEST(KeyboardTest, ImeExtendedEventsAreIgnored) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout.
// There should be preceding key events to make the keyboard into IME mode.
// Omit them in this test since they are not relavent.
// Press CtrlRight in IME mode.
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
KeyStateChange{VK_RCONTROL, true, false},
WmKeyDownInfo{VK_PROCESSKEY, kScanCodeControl, kExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, 0, 0, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, DisorderlyRespondedEvents) {
KeyboardTester tester;
// Store callbacks to manually call them.
std::vector<MockKeyResponseController::ResponseCallback> recorded_callbacks;
tester.LateResponding(
[&recorded_callbacks](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
recorded_callbacks.push_back(callback);
});
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
// Press B
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyB, kScanCodeKeyB, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'b', kScanCodeKeyB, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_EQ(recorded_callbacks.size(), 2);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Resolve the second event first to test disordered responses.
recorded_callbacks.back()(false);
EXPECT_EQ(key_calls.size(), 0);
clear_key_calls();
// TODO(dkwingsmt): This should probably be 0. Redispatching the messages of
// the second event this early means that the messages are not redispatched
// in the order of arrival. https://github.com/flutter/flutter/issues/98308
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Resolve the first event.
recorded_callbacks.front()(false);
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_TEXT(key_calls[0], u"a");
EXPECT_CALL_IS_TEXT(key_calls[1], u"b");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
}
// Regression test for a crash in an earlier implementation.
//
// In real life, the framework responds slowly. The next real event might
// arrive earlier than the framework response, and if the 2nd event has an
// identical hash as the one waiting for response, an earlier implementation
// will crash upon the response.
TEST(KeyboardTest, SlowFrameworkResponse) {
KeyboardTester tester;
std::vector<MockKeyResponseController::ResponseCallback> recorded_callbacks;
// Store callbacks to manually call them.
tester.LateResponding(
[&recorded_callbacks](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
recorded_callbacks.push_back(callback);
});
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
// Hold A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasDown}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasDown}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_EQ(recorded_callbacks.size(), 2);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// The first response.
recorded_callbacks.front()(false);
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_TEXT(key_calls[0], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// The second response.
recorded_callbacks.back()(false);
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_TEXT(key_calls[0], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
}
// Regression test for https://github.com/flutter/flutter/issues/84210.
//
// When the framework response is slow during a sequence of identical messages,
// make sure the real messages are not mistaken as redispatched messages,
// in order to not mess up the order of events.
//
// In this test we use:
//
// KeyA down, KeyA up, (down event responded with false), KeyA down, KeyA up,
//
// The code must not take the 2nd real key down events as a redispatched event.
TEST(KeyboardTest, SlowFrameworkResponseForIdenticalEvents) {
KeyboardTester tester;
std::vector<MockKeyResponseController::ResponseCallback> recorded_callbacks;
// Store callbacks to manually call them.
tester.LateResponding(
[&recorded_callbacks](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
recorded_callbacks.push_back(callback);
});
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// The first down event responded with false.
EXPECT_EQ(recorded_callbacks.size(), 2);
recorded_callbacks.front()(false);
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_TEXT(key_calls[0], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Press A again
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
// Release A again
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, TextInputSubmit) {
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
tester.GetView().HandleMessage(
"flutter/textinput", "TextInput.setClient",
R"|([108, {"inputAction": "TextInputAction.none"}])|");
// Press Enter
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{VK_RETURN, kScanCodeEnter, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'\n', kScanCodeEnter, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalEnter,
kLogicalEnter, "", kNotSynthesized);
EXPECT_CALL_IS_TEXT_METHOD_CALL(
key_calls[1],
"{"
R"|("method":"TextInputClient.performAction",)|"
R"|("args":[108,"TextInputAction.none"])|"
"}");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release Enter
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{VK_RETURN, kScanCodeEnter, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalEnter,
kLogicalEnter, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Make sure OnText is not obstructed after pressing Enter.
//
// Regression test for https://github.com/flutter/flutter/issues/97706.
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"a");
clear_key_calls();
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
}
TEST(KeyboardTest, VietnameseTelexAddDiacriticWithFastResponse) {
// In this test, the user presses the folloing keys:
//
// Key Current text
// ===========================
// A a
// F à
//
// And the Backspace event is responded immediately.
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
// Press F, which is translated to:
//
// Backspace down, char & up, then VK_PACKET('à').
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{VK_BACK, kScanCodeBackspace, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0x8, kScanCodeBackspace, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmKeyUpInfo{VK_BACK, kScanCodeBackspace, kNotExtended}.Build(
kWmResultZero),
WmKeyDownInfo{VK_PACKET, 0, kNotExtended, kWasUp}.Build(kWmResultDefault),
WmCharInfo{0xe0 /*'à'*/, 0, kNotExtended, kWasUp}.Build(kWmResultZero),
WmKeyUpInfo{VK_PACKET, 0, kNotExtended}.Build(kWmResultDefault)});
EXPECT_EQ(key_calls.size(), 3);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBackspace, kLogicalBackspace, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalBackspace,
kLogicalBackspace, "", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[2], u"à");
clear_key_calls();
// TODO(dkwingsmt): This count should probably be 4. Currently the CHAR 0x8
// message is redispatched due to being part of the KeyDown session, which is
// not handled by the framework, while the 'à' message is not redispatched
// for being a standalone message. We should resolve this inconsistency.
// https://github.com/flutter/flutter/issues/98306
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 3);
// Release F
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyF, kScanCodeKeyF, kNotExtended,
/* overwrite_prev_state_0 */ true}
.Build(kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, 0, 0, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
void VietnameseTelexAddDiacriticWithSlowResponse(bool backspace_response) {
// In this test, the user presses the folloing keys:
//
// Key Current text
// ===========================
// A a
// F à
//
// And the Backspace down event is responded slowly with `backspace_response`.
KeyboardTester tester;
tester.Responding(false);
// US Keyboard layout
// Press A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{'a', kScanCodeKeyA, kNotExtended, kWasUp}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, kPhysicalKeyA,
kLogicalKeyA, "a", kNotSynthesized);
EXPECT_CALL_IS_TEXT(key_calls[1], u"a");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 2);
// Release A
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyA, kScanCodeKeyA, kNotExtended}.Build(
kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeUp, kPhysicalKeyA,
kLogicalKeyA, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
std::vector<MockKeyResponseController::ResponseCallback> recorded_callbacks;
tester.LateResponding(
[&recorded_callbacks](
const FlutterKeyEvent* event,
MockKeyResponseController::ResponseCallback callback) {
recorded_callbacks.push_back(callback);
});
// Press F, which is translated to:
//
// Backspace down, char & up, VK_PACKET('à').
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyDownInfo{VK_BACK, kScanCodeBackspace, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmCharInfo{0x8, kScanCodeBackspace, kNotExtended, kWasUp}.Build(
kWmResultZero),
WmKeyUpInfo{VK_BACK, kScanCodeBackspace, kNotExtended}.Build(
kWmResultZero),
WmKeyDownInfo{VK_PACKET, 0, kNotExtended, kWasUp}.Build(kWmResultDefault),
WmCharInfo{0xe0 /*'à'*/, 0, kNotExtended, kWasUp}.Build(kWmResultZero),
WmKeyUpInfo{VK_PACKET, 0, kNotExtended}.Build(kWmResultDefault)});
// The Backspace event has not responded yet, therefore the char message must
// hold. This is because when the framework is handling the Backspace event,
// it will send a setEditingState message that updates the text state that has
// the last character deleted (denoted by `string1`). Processing the char
// message before then will cause the final text to set to `string1`.
EXPECT_EQ(key_calls.size(), 2);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown,
kPhysicalBackspace, kLogicalBackspace, "",
kNotSynthesized);
EXPECT_CALL_IS_EVENT(key_calls[1], kFlutterKeyEventTypeUp, kPhysicalBackspace,
kLogicalBackspace, "", kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
EXPECT_EQ(recorded_callbacks.size(), 2);
recorded_callbacks[0](backspace_response);
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_TEXT(key_calls[0], u"à");
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(),
backspace_response ? 0 : 2);
recorded_callbacks[1](false);
EXPECT_EQ(key_calls.size(), 0);
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 1);
tester.Responding(false);
// Release F
tester.InjectKeyboardChanges(std::vector<KeyboardChange>{
WmKeyUpInfo{kVirtualKeyF, kScanCodeKeyF, kNotExtended,
/* overwrite_prev_state_0 */ true}
.Build(kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1);
EXPECT_CALL_IS_EVENT(key_calls[0], kFlutterKeyEventTypeDown, 0, 0, "",
kNotSynthesized);
clear_key_calls();
EXPECT_EQ(tester.RedispatchedMessageCountAndClear(), 0);
}
TEST(KeyboardTest, VietnameseTelexAddDiacriticWithSlowFalseResponse) {
VietnameseTelexAddDiacriticWithSlowResponse(false);
}
TEST(KeyboardTest, VietnameseTelexAddDiacriticWithSlowTrueResponse) {
VietnameseTelexAddDiacriticWithSlowResponse(true);
}
} // namespace testing
} // namespace flutter<|fim▁end|> | kWmResultZero)});
EXPECT_EQ(key_calls.size(), 1); |
<|file_name|>Tab1.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
import {ContentBlock} from 'framework7-react';
export const Tab1 = () => {
return (
<ContentBlock>
<p>Tab 1</p>
<p><a href="/nested-routes/tabs/tab-2/">Go to tab 2</a></p>
<p><a href="/nested-routes/tabs/tab-3/">Go to tab 3</a></p>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Iure odio dolor, soluta reprehenderit, corporis officia earum corrupti amet ea aspernatur praesentium, distinctio ipsa atque officiis. Recusandae ab error, atque natus.</p>
<p>Quibusdam repellendus, repudiandae nulla commodi ut nemo eum, quia dolorum dicta voluptate reprehenderit. Omnis, temporibus iure quia ad sit incidunt similique et quaerat dicta delectus aspernatur, unde illo cum ipsam?</p>
<p>Officia alias inventore soluta illo, omnis tempore obcaecati at, harum impedit nemo enim, iure fugit est explicabo commodi ipsam distinctio architecto voluptatum reprehenderit, aperiam dolorem praesentium repellendus repudiandae ducimus saepe.</p>
<p>Autem ullam tempore suscipit consectetur, odit soluta. Neque quasi mollitia culpa autem, quis ab nostrum non distinctio illo magni perspiciatis et consequuntur possimus aliquam reprehenderit reiciendis praesentium consectetur, voluptas obcaecati?</p>
<p>Quae repellendus aut iste provident consectetur inventore voluptatibus, saepe ad dolorem voluptatum recusandae praesentium animi harum officiis error nulla ullam nobis? Mollitia possimus voluptatum pariatur omnis quibusdam, quidem recusandae velit.</p>
</ContentBlock> <|fim▁hole|><|fim▁end|> | );
}; |
<|file_name|>apply_crd_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"context"
"encoding/json"
"fmt"
"path"
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic"
featuregatetesting "k8s.io/component-base/featuregate/testing"
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/test/integration/framework"
)
// TestApplyCRDStructuralSchema tests that when a CRD has a structural schema in its validation field,
// it will be used to construct the CR schema used by apply.
func TestApplyCRDStructuralSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewMultipleVersionNoxuCRD(apiextensionsv1.ClusterScoped)
var c apiextensionsv1.CustomResourceValidation
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"type": "object",
"properties": {
"spec": {
"type": "object",
"x-kubernetes-preserve-unknown-fields": true,
"properties": {
"cronSpec": {
"type": "string",
"pattern": "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$"
},
"ports": {
"type": "array",
"x-kubernetes-list-map-keys": [
"containerPort",
"protocol"
],
"x-kubernetes-list-type": "map",
"items": {
"properties": {
"containerPort": {
"format": "int32",
"type": "integer"
},
"hostIP": {
"type": "string"
},
"hostPort": {
"format": "int32",
"type": "integer"
},
"name": {
"type": "string"
},
"protocol": {
"type": "string"
}
},
"required": [
"containerPort",
"protocol"
],
"type": "object"
}
}
}
}
}
}
}`), &c)
if err != nil {
t.Fatal(err)
}
for i := range noxuDefinition.Spec.Versions {
noxuDefinition.Spec.Versions[i].Schema = &c
}
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
finalizers:
- test-finalizer
spec:
cronSpec: "* * * * */5"
replicas: 1
ports:
- name: x
containerPort: 80
protocol: TCP`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 1)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyReplicas(t, result, 1)
verifyNumPorts(t, result, 1)
// Patch object to add another finalizer to the finalizers list
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to add finalizer with merge patch: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Re-apply the same config, should work fine, since finalizers should have the list-type extension 'set'.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
SetHeader("Accept", "application/json").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// New applier tries to edit an existing list item, we should get conflicts.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 80
protocol: TCP`, apiVersion, kind, name))).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
}
status, ok = err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when a different applier updates existing list item, got: %v", status.Status().Details.Causes)
}
// New applier tries to add a new list item, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 8080
protocol: TCP`, apiVersion, kind, name))).
SetHeader("Accept", "application/json").
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to add a new list item to the object as a different applier: %v:\n%v", err, string(result))
}
verifyNumPorts(t, result, 2)
// UpdateOnCreate
notExistingYAMLBody := []byte(fmt.Sprintf(`
{
"apiVersion": "%s",
"kind": "%s",
"metadata": {
"name": "%s",
"finalizers": [
"test-finalizer"
]
},
"spec": {
"cronSpec": "* * * * */5",
"replicas": 1,
"ports": [
{
"name": "x",
"containerPort": 80
}
]
},
"protocol": "TCP"
}`, apiVersion, kind, "should-not-exist"))
_, err = rest.Put().
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name("should-not-exist").
Param("fieldManager", "apply_test").
Body(notExistingYAMLBody).
DoRaw(context.TODO())
if !apierrors.IsNotFound(err) {
t.Fatalf("create on update should fail with notFound, got %v", err)
}
}
// verifyNumFinalizers checks that len(.metadata.finalizers) == n
func verifyNumFinalizers(t *testing.T, b []byte, n int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to unmarshal response: %v", err)
}
if actual, expected := len(obj.GetFinalizers()), n; actual != expected {
t.Fatalf("expected %v finalizers but got %v:\n%v", expected, actual, string(b))
}
}
// verifyFinalizersIncludes checks that .metadata.finalizers includes e
func verifyFinalizersIncludes(t *testing.T, b []byte, e string) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to unmarshal response: %v", err)
}
for _, a := range obj.GetFinalizers() {
if a == e {
return
}
}
t.Fatalf("expected finalizers to include %q but got: %v", e, obj.GetFinalizers())
}
// verifyReplicas checks that .spec.replicas == r
func verifyReplicas(t *testing.T, b []byte, r int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to find replicas number in response: %v:\n%v", err, string(b))
}
spec, ok := obj.Object["spec"]
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
specMap, ok := spec.(map[string]interface{})
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
replicas, ok := specMap["replicas"]
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
replicasNumber, ok := replicas.(int64)
if !ok {
t.Fatalf("failed to find replicas number in response: expected int64 but got: %v", reflect.TypeOf(replicas))
}
if actual, expected := replicasNumber, int64(r); actual != expected {
t.Fatalf("expected %v ports but got %v:\n%v", expected, actual, string(b))
}
}
// verifyNumPorts checks that len(.spec.ports) == n
func verifyNumPorts(t *testing.T, b []byte, n int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to find ports list in response: %v:\n%v", err, string(b))
}
spec, ok := obj.Object["spec"]
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
specMap, ok := spec.(map[string]interface{})
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
ports, ok := specMap["ports"]
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
portsList, ok := ports.([]interface{})
if !ok {
t.Fatalf("failed to find ports list in response: expected array but got: %v", reflect.TypeOf(ports))
}
if actual, expected := len(portsList), n; actual != expected {
t.Fatalf("expected %v ports but got %v:\n%v", expected, actual, string(b))
}
}
func findCRDCondition(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) *apiextensionsv1.CustomResourceDefinitionCondition {
for i := range crd.Status.Conditions {
if crd.Status.Conditions[i].Type == conditionType {
return &crd.Status.Conditions[i]
}
}
return nil
}
// TestApplyCRDUnhandledSchema tests that when a CRD has a schema that kube-openapi ToProtoModels cannot handle correctly,
// apply falls back to non-schema behavior
func TestApplyCRDUnhandledSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
storageConfig := framework.SharedEtcd()
tlsInfo := transport.TLSInfo{
CertFile: storageConfig.Transport.CertFile,
KeyFile: storageConfig.Transport.KeyFile,
TrustedCAFile: storageConfig.Transport.TrustedCAFile,
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
t.Fatal(err)
}
etcdConfig := clientv3.Config{
Endpoints: storageConfig.Transport.ServerList,
DialTimeout: 20 * time.Second,
DialOptions: []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
},
TLS: tlsConfig,
}
etcdclient, err := clientv3.New(etcdConfig)
if err != nil {
t.Fatal(err)
}
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, storageConfig)
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
// this has to be v1beta1, so we can have an item with validation that does not match. v1 validation prevents this.
noxuBetaDefinition := &apiextensionsv1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "CustomResourceDefinition",
APIVersion: "apiextensions.k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: "mygroup.example.com",
Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{{
Name: "v1beta1",
Served: true,
Storage: true,
}},
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "noxus",
Singular: "nonenglishnoxu",
Kind: "WishIHadChosenNoxu",
ShortNames: []string{"foo", "bar", "abc", "def"},
ListKind: "NoxuItemList",
Categories: []string{"all"},
},
Scope: apiextensionsv1beta1.ClusterScoped,
},
}
// This is a schema that kube-openapi ToProtoModels does not handle correctly.
// https://github.com/kubernetes/kubernetes/blob/38752f7f99869ed65fb44378360a517649dc2f83/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go#L184
var c apiextensionsv1beta1.CustomResourceValidation
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"properties": {
"TypeFooBar": {
"type": "array"
}
}
}
}`), &c)
if err != nil {
t.Fatal(err)
}
noxuBetaDefinition.Spec.Validation = &c
betaBytes, err := json.Marshal(noxuBetaDefinition)
if err != nil {
t.Fatal(err)
}
t.Log(string(betaBytes))
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceNone)
key := path.Join("/", storageConfig.Prefix, "apiextensions.k8s.io", "customresourcedefinitions", noxuBetaDefinition.Name)
if _, err := etcdclient.Put(ctx, key, string(betaBytes)); err != nil {
t.Fatalf("unexpected error: %v", err)
}
noxuDefinition, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), noxuBetaDefinition.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// wait until the CRD is established
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
localCrd, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), noxuBetaDefinition.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
condition := findCRDCondition(localCrd, apiextensionsv1.Established)
if condition == nil {
return false, nil
}
if condition.Status == apiextensionsv1.ConditionTrue {
return true, nil
}
return false, nil
})
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
replicas: 1`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
}
func getManagedFields(rawResponse []byte) ([]metav1.ManagedFieldsEntry, error) {
obj := unstructured.Unstructured{}
if err := obj.UnmarshalJSON(rawResponse); err != nil {
return nil, err
}
return obj.GetManagedFields(), nil
}
func TestDefaultMissingKeyCRD(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewNoxuV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"type": "object",
"properties": {
"spec": {
"type": "object",
"x-kubernetes-preserve-unknown-fields": true,
"properties": {
"cronSpec": {
"type": "string",
"pattern": "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$"
},
"ports": {
"type": "array",
"x-kubernetes-list-map-keys": [
"containerPort",
"protocol"
],
"x-kubernetes-list-type": "map",
"items": {
"properties": {
"containerPort": {
"format": "int32",
"type": "integer"
},
"hostIP": {
"type": "string"
},
"hostPort": {
"format": "int32",
"type": "integer"
},
"name": {
"type": "string"
},
"protocol": {
"default": "TCP",
"type": "string"
}
},
"required": [
"containerPort"
],
"type": "object"
}
}
}<|fim▁hole|> if err != nil {
t.Fatal(err)
}
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
finalizers:
- test-finalizer
spec:
cronSpec: "* * * * */5"
replicas: 1
ports:
- name: x
containerPort: 80`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
// New applier tries to edit an existing list item, we should get conflicts.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 80
protocol: TCP`, apiVersion, kind, name))).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when a different applier updates existing list item, got: %v", status.Status().Details.Causes)
}
}<|fim▁end|> | }
}
}
}`), &noxuDefinition.Spec.Versions[0].Schema) |
<|file_name|>fmt.rs<|end_file_name|><|fim▁begin|>use std::fmt;
#[cfg(all(feature = "color", not(target_os = "windows")))]
use ansi_term::Colour::{Red, Green, Yellow};
#[cfg(all(feature = "color", not(target_os = "windows")))]
use ansi_term::ANSIString;
#[allow(dead_code)]
pub enum Format<T> {
Error(T),
Warning(T),
Good(T),
}
#[cfg(all(feature = "color", not(target_os = "windows")))]
impl<T: AsRef<str>> Format<T> {
fn format(&self) -> ANSIString {
match *self {
Format::Error(ref e) => Red.bold().paint(e.as_ref()),
Format::Warning(ref e) => Yellow.paint(e.as_ref()),
Format::Good(ref e) => Green.paint(e.as_ref()),
}
}
}
#[cfg(all(feature = "color", not(target_os = "windows")))]
impl<T: AsRef<str>> fmt::Display for Format<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.format())
}
}
#[cfg(any(not(feature = "color"), target_os = "windows"))]
impl<T: fmt::Display> Format<T> {
fn format(&self) -> &T {
match *self {
Format::Error(ref e) => e,
Format::Warning(ref e) => e,<|fim▁hole|> Format::Good(ref e) => e,
}
}
}
#[cfg(any(not(feature = "color"), target_os = "windows"))]
impl<T: fmt::Display> fmt::Display for Format<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.format())
}
}
pub fn format_number(n: u64, sep: Option<char>) -> String {
debugln!("executing; format_number; n={}", n);
let s = format!("{}", n);
if let Some(sep) = sep {
debugln!("There was a separator {}", sep);
let mut ins_sep = s.len() % 3;
ins_sep = if ins_sep == 0 { 3 } else {ins_sep};
let mut ret = vec![];
for (i, c) in s.chars().enumerate() {
debugln!("iter; c={}; ins_sep={}; ret={:?}", c, ins_sep, ret);
if ins_sep == 0 && i != 0 {
debugln!("Inserting the separator");
ret.push(sep);
ins_sep = 3;
}
ret.push(c);
ins_sep -= 1;
}
debugln!("returning; ret={}", ret.iter().cloned().collect::<String>());
ret.iter().cloned().collect()
} else {
debugln!("There was not a separator");
s
}
}<|fim▁end|> | |
<|file_name|>DiagnosticGroupWarningsGuard.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.javascript.jscomp.CheckLevel;
/**
* Sets the level for a particular DiagnosticGroup.
* @author [email protected] (Nick Santos)
*/
public class DiagnosticGroupWarningsGuard extends WarningsGuard {
private final DiagnosticGroup group;
private final CheckLevel level;<|fim▁hole|> DiagnosticGroup group, CheckLevel level) {
this.group = group;
this.level = level;
}
@Override
public CheckLevel level(JSError error) {
return group.matches(error) ? level : null;
}
@Override
public boolean disables(DiagnosticGroup otherGroup) {
return !level.isOn() && group.isSubGroup(otherGroup);
}
@Override
public boolean enables(DiagnosticGroup otherGroup) {
if (level.isOn()) {
for (DiagnosticType type : otherGroup.getTypes()) {
if (group.matches(type)) {
return true;
}
}
}
return false;
}
}<|fim▁end|> |
public DiagnosticGroupWarningsGuard( |
<|file_name|>service.rs<|end_file_name|><|fim▁begin|>use futures::{Future, IntoFuture};
use std::io;
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
/// An asynchronous function from `Request` to a `Response`.
///
/// The `Service` trait is a simplified interface making it easy to write
/// network applications in a modular and reusable way, decoupled from the
/// underlying protocol. It is one of Tokio's fundamental abstractions.
///
/// # Functional
///
/// A `Service` is a function from a `Request`. It immediately returns a
/// `Future` representing the the eventual completion of processing the
/// request. The actual request processing may happen at any time in the
/// future, on any thread or executor. The processing may depend on calling
/// other services. At some point in the future, the processing will complete,
/// and the `Future` will resolve to a response or error.
///
/// At a high level, the `Service::call` represents an RPC request. The
/// `Service` value can be a server or a client.
///
/// # Server
///
/// An RPC server *implements* the `Service` trait. Requests received by the
/// server over the network are deserialized then passed as an argument to the
/// server value. The returned response is sent back over the network.
///
/// As an example, here is how an HTTP request is processed by a server:
///
/// ```rust,ignore
/// impl Service for HelloWorld {
/// type Req = http::Request;
/// type Resp = http::Response;
/// type Error = http::Error;
/// type Fut = Box<Future<Item = Self::Resp, Error = http::Error>>;
///
/// fn call(&self, req: http::Request) -> Self::Fut {
/// // Create the HTTP response
/// let resp = http::Response::ok()
/// .with_body(b"hello world\n");
///
/// // Return the response as an immediate future
/// futures::finished(resp).boxed()
/// }
/// }
/// ```
///
/// # Client
///
/// A client consumes a service by using a `Service` value. The client may
/// issue requests by invoking `call` and passing the request as an argument.
/// It then waits receives the response by waiting for the returned future.
///
/// As an example, here is how a Redis request would be issued:
///
/// ```rust,ignore
/// let client = redis::Client::new()
/// .connect("127.0.0.1:6379".parse().unwrap())
/// .unwrap();
///
/// let resp = client.call(Cmd::set("foo", "this is the value of foo"));
///
/// // Wait for the future to resolve
/// println!("Redis response: {:?}", await(resp));
/// ```
///
/// # Middleware
///
/// More often than not, all the pieces needed for writing robust, scalable
/// network applications are the same no matter the underlying protocol. By
/// unifying the API for both clients and servers in a protocol agnostic way,
/// it is possible to write middlware that provide these pieces in in a
/// reusable way.
///
/// For example, take timeouts as an example:
///
/// ```rust,ignore
/// use tokio::Service;
/// use futures::Future;
/// use std::time::Duration;
///
/// // Not yet implemented, but soon :)
/// use tokio::timer::{Timer, Expired};
///
/// pub struct Timeout<T> {
/// upstream: T,
/// delay: Duration,
/// timer: Timer,
/// }
///
/// impl<T> Timeout<T> {
/// pub fn new(upstream: T, delay: Duration) -> Timeout<T> {
/// Timeout {
/// upstream: upstream,
/// delay: delay,
/// timer: Timer::default(),
/// }
/// }
/// }
///
/// impl<T> Service for Timeout<T>
/// where T: Service,
/// T::Error: From<Expired>,
/// {
/// type Req = T::Req;<|fim▁hole|>/// fn call(&self, req: Self::Req) -> Self::Fut {
/// let timeout = self.timer.timeout(self.delay)
/// .and_then(|timeout| Err(Self::Error::from(timeout)));
///
/// self.upstream.call(req)
/// .select(timeout)
/// .map(|(v, _)| v)
/// .map_err(|(e, _)| e)
/// .boxed()
/// }
/// }
///
/// ```
///
/// The above timeout implementation is decoupled from the underlying protocol
/// and is also decoupled from client or server concerns. In other words, the
/// same timeout middleware could be used in either a client or a server.
pub trait Service: Send + 'static {
/// Requests handled by the service.
type Req: Send + 'static;
/// Responses given by the service.
type Resp: Send + 'static;
/// Errors produced by the service.
type Error: Send + 'static;
/// The future response value.
type Fut: Future<Item = Self::Resp, Error = Self::Error>;
/// Process the request and return the response asynchronously.
fn call(&self, req: Self::Req) -> Self::Fut;
}
/// Creates new `Service` values.
pub trait NewService {
/// Requests handled by the service
type Req: Send + 'static;
/// Responses given by the service
type Resp: Send + 'static;
/// Errors produced by the service
type Error: Send + 'static;
/// The `Service` value created by this factory
type Item: Service<Req = Self::Req, Resp = Self::Resp, Error = Self::Error>;
/// Create and return a new service value.
fn new_service(&self) -> io::Result<Self::Item>;
}
/// A service implemented by a closure.
pub struct SimpleService<F, R> {
f: Arc<F>,
_ty: PhantomData<Mutex<R>>, // use Mutex to avoid imposing Sync on Req
}
/// Returns a `Service` backed by the given closure.
pub fn simple_service<F, R>(f: F) -> SimpleService<F, R> {
SimpleService::new(f)
}
impl<F, R> SimpleService<F, R> {
/// Create and return a new `SimpleService` backed by the given function.
pub fn new(f: F) -> SimpleService<F, R> {
SimpleService {
f: Arc::new(f),
_ty: PhantomData,
}
}
}
impl<F, R, S> Service for SimpleService<F, R>
where F: Fn(R) -> S + Sync + Send + 'static,
R: Send + 'static,
S: IntoFuture,
{
type Req = R;
type Resp = S::Item;
type Error = S::Error;
type Fut = S::Future;
fn call(&self, req: R) -> Self::Fut {
(self.f)(req).into_future()
}
}
impl<F, R> Clone for SimpleService<F, R>
{
fn clone(&self) -> SimpleService<F, R> {
SimpleService {
f: self.f.clone(),
_ty: PhantomData,
}
}
}
impl<T> NewService for T
where T: Service + Clone,
{
type Item = T;
type Req = T::Req;
type Resp = T::Resp;
type Error = T::Error;
fn new_service(&self) -> io::Result<T> {
Ok(self.clone())
}
}<|fim▁end|> | /// type Resp = T::Resp;
/// type Error = T::Error;
/// type Fut = Box<Future<Item = Self::Resp, Error = Self::Error>>;
/// |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
This module contains the :class:`.DataType` class and its subclasses. These
types define how data should be converted during the creation of a
:class:`.Table`.
A :class:`TypeTester` class is also included which be used to infer data
types from column data.
"""
from copy import copy
from agate.data_types.base import DEFAULT_NULL_VALUES, DataType # noqa
from agate.data_types.boolean import Boolean
from agate.data_types.date import Date
from agate.data_types.date_time import DateTime
from agate.data_types.number import Number
from agate.data_types.text import Text
from agate.data_types.time_delta import TimeDelta
from agate.exceptions import CastError # noqa
class TypeTester(object):
"""
Infer data types for the columns in a given set of data.
:param force:
A dictionary where each key is a column name and each value is a
:class:`.DataType` instance that overrides inference.
:param limit:
An optional limit on how many rows to evaluate before selecting the
most likely type. Note that applying a limit may mean errors arise when
the data is cast--if the guess is proved incorrect in further rows of
data.
:param types:
A sequence of possible types to test against. This be used to specify
what data formats you want to test against. For instance, you may want
to exclude :class:`TimeDelta` from testing. It can also be used to pass
options such as ``locale`` to :class:`.Number` or ``cast_nulls`` to
:class:`.Text`. Take care in specifying the order of the list. It is
the order they are tested in. :class:`.Text` should always be last.
"""
def __init__(self, force={}, limit=None, types=None):
self._force = force
self._limit = limit
if types:
self._possible_types = types
else:
# In order of preference
self._possible_types = [
Boolean(),
Number(),
TimeDelta(),
Date(),
DateTime(),
Text()
]
def run(self, rows, column_names):
"""
Apply type inference to the provided data and return an array of
column types.
:param rows:
The data as a sequence of any sequences: tuples, lists, etc.
"""
num_columns = len(column_names)
hypotheses = [set(self._possible_types) for i in range(num_columns)]
force_indices = [column_names.index(name) for name in self._force.keys()]
if self._limit:
sample_rows = rows[:self._limit]
elif self._limit == 0:
text = Text()
return tuple([text] * num_columns)
else:
sample_rows = rows
for row in sample_rows:
for i in range(num_columns):<|fim▁hole|>
h = hypotheses[i]
if len(h) == 1:
continue
for column_type in copy(h):
if len(row) > i and not column_type.test(row[i]):
h.remove(column_type)
column_types = []
for i in range(num_columns):
if i in force_indices:
column_types.append(self._force[column_names[i]])
continue
h = hypotheses[i]
# Select in prefer order
for t in self._possible_types:
if t in h:
column_types.append(t)
break
return tuple(column_types)<|fim▁end|> | if i in force_indices:
continue |
<|file_name|>log_reader.py<|end_file_name|><|fim▁begin|>import time
import asyncio
from aiokafka import AIOKafkaProducer
from settings import KAFKA_SERVERS, SAVEPOINT, LOG_FILE, KAFKA_TOPIC
class LogStreamer:
def __init__(self,
KAFKA_SERVERS,
KAFKA_TOPIC,
loop,
savepoint_file,
log_file):
self.KAFKA_TOPIC = KAFKA_TOPIC
self.loop = loop
self.producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=KAFKA_SERVERS)
self.savepoint_file = savepoint_file
self.log_file = log_file
async def produce(self, finite=False):
last = self.savepoint_file.read()
if last:
self.log_file.seek(int(last))
skip_first_empty = True
while True:
line = self.log_file.readline()
line = line.strip(' \t\n\r')
if not line:
if finite and not skip_first_empty:
return
skip_first_empty = False
time.sleep(0.1)
current_position = self.log_file.tell()
if last != current_position:<|fim▁hole|>
'''
Here we can convert our data to JSON. But I because JSON performance is not extremely good
with standart libraries, and because we use asynchronous non-blocking model here, I think it's
best to just pass data as is. I want to create as little as possible overhead here. We want to
stream data as fast as possible.
'''
await self.producer.send_and_wait(self.KAFKA_TOPIC, line.encode())
def start(self):
self.loop.run_until_complete(self.producer.start())
self.loop.run_until_complete(self.produce())
self.loop.run_until_complete(self.producer.stop())
self.loop.close()
if __name__ == '__main__':
with open(SAVEPOINT, 'r+') as savepoint_file, open(LOG_FILE, 'r') as log_file:
streamer = LogStreamer(KAFKA_SERVERS,
KAFKA_TOPIC,
asyncio.get_event_loop(),
savepoint_file,
log_file)
streamer.start()<|fim▁end|> | self.savepoint_file.seek(0)
self.savepoint_file.write(str(current_position))
continue |
<|file_name|>SlippyMapOriginal.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import sip
sip.setapi('QVariant', 2)
import math
from PyQt4 import QtCore, QtGui, QtNetwork
from lib.Point import Point
from lib.tileOperations import *
TDIM = 256
class LightMaps(QtGui.QWidget):
def __init__(self, parent = None):
super(LightMaps, self).__init__(parent)
self.pressed = False
self.snapped = False
self._map = SlippyMap(self)
self.pressPos = QtCore.QPoint()
self.dragPos = QtCore.QPoint()
self._map.updated.connect(self.updateMap)
def setCenter(self, lat, lng):
self._map.latitude = lat
self._map.longitude = lng
self._map.invalidate()
def updateMap(self, r):
self.update(r)
def resizeEvent(self, event):
self._map.width = self.width()
self._map.height = self.height()
self._map.invalidate()
def paintEvent(self, event):
p = QtGui.QPainter()
p.begin(self)
self._map.render(p, event.rect())
p.setPen(QtCore.Qt.black)
p.end()
def mousePressEvent(self, event):
if event.buttons() != QtCore.Qt.LeftButton:
return
self.pressed = self.snapped = True
self.pressPos = self.dragPos = event.pos()
def mouseMoveEvent(self, event):
if not event.buttons():
return
if not self.pressed or not self.snapped:
delta = event.pos() - self.pressPos
self.pressPos = event.pos()
self._map.pan(delta)
return
else:
threshold = 10
delta = event.pos() - self.pressPos
if self.snapped:
self.snapped &= delta.x() < threshold
self.snapped &= delta.y() < threshold
self.snapped &= delta.x() > -threshold
self.snapped &= delta.y() > -threshold
self.dragPos = event.pos()
def mouseReleaseEvent(self, event):
self.update()
def wheelEvent(self, event):
delta = event.delta()
delta = abs(delta)/delta
self._map.change_zoom(delta)
self.update();
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Left:
self._map.pan(QtCore.QPoint(20, 0))
if event.key() == QtCore.Qt.Key_Right:
self._map.pan(QtCore.QPoint(-20, 0))
if event.key() == QtCore.Qt.Key_Up:
self._map.pan(QtCore.QPoint(0, 20))
if event.key() == QtCore.Qt.Key_Down:
self._map.pan(QtCore.QPoint(0, -20))
if event.key() == QtCore.Qt.Key_Z or event.key() == QtCore.Qt.Key_Select:
self.dragPos = QtCore.QPoint(self.width() / 2, self.height() / 2)
class SlippyMap(QtCore.QObject):
updated = QtCore.pyqtSignal(QtCore.QRect)
def __init__(self, parent=None):
super(SlippyMap, self).__init__(parent)
self._offset = QtCore.QPoint()
self._tilesRect = QtCore.QRect()
self._tilePixmaps = {} # Point(x, y) to QPixmap mapping
self._manager = TileDownloader(self) ##QtNetwork.QNetworkAccessManager() #############
#self._manager.finished.connect(self.handleNetworkData)
self._url = QtCore.QUrl()
# public vars
self.width = 400
self.height = 300
self.zoom = 7
self.latitude = -30
self.longitude = -51.2
self._emptyTile = QtGui.QPixmap(TDIM, TDIM)
self._emptyTile.fill(QtCore.Qt.lightGray)
##############
###############
def invalidate(self):
if self.width <= 0 or self.height <= 0:
return
print self.latitude, self.longitude, self.zoom
tx, ty = tileIndexForCoordinate(self.latitude, self.longitude, self.zoom)
# tx = ct.x()
# ty = ct.y()
# top-left corner of the center tile
xp = int(self.width / 2 - (tx - math.floor(tx)) * TDIM)
yp = int(self.height / 2 - (ty - math.floor(ty)) * TDIM)
# first tile vertical and horizontal
xa = (xp + TDIM - 1) / TDIM
ya = (yp + TDIM - 1) / TDIM
xs = int(tx) - xa
ys = int(ty) - ya
# offset for top-left tile
self._offset = QtCore.QPoint(xp - xa * TDIM, yp - ya * TDIM)
# last tile vertical and horizontal
xe = int(tx) + (self.width - xp - 1) / TDIM
ye = int(ty) + (self.height - yp - 1) / TDIM
# build a rect
self._tilesRect = QtCore.QRect(xs, ys, xe - xs + 1, ye - ys + 1)
if self._url.isEmpty():
self._manager.download()
self.updated.emit(QtCore.QRect(0, 0, self.width, self.height))
def render(self, painter, rect):
for x in range(self._tilesRect.width()):
for y in range(self._tilesRect.height()):
print x, y
tp = Point(x + self._tilesRect.left(), y + self._tilesRect.top())
box = QtCore.QRect(self._manager.tileRect(tp))
if rect.intersects(box):
print "Box", box
painter.drawPixmap(box, self._tilePixmaps.get(tp, self._emptyTile))
def pan(self, delta):
dx = QtCore.QPointF(delta) / float(TDIM)
cx, cy = tileIndexForCoordinate(self.latitude, self.longitude, self.zoom)
center = QtCore.QPointF(cx, cy) - dx
self.latitude = latitudeFromTileY(center.y(), self.zoom)
self.longitude = longitudeFromTileX(center.x(), self.zoom)
self.invalidate()
def change_zoom(self, val):
self.zoom = max(1, min(22, self.zoom + val))
print "ZOOM", self.zoom
self.invalidate();
############################
class TileDownloader(QtNetwork.QNetworkAccessManager):
updated = QtCore.pyqtSignal(QtCore.QRect)
def __init__(self, parent=None):
super(TileDownloader, self).__init__()
self.parent = parent
cache = QtNetwork.QNetworkDiskCache()
cache.setCacheDirectory(
QtGui.QDesktopServices.storageLocation
(QtGui.QDesktopServices.CacheLocation))
self.setCache(cache)
self.finished.connect(self.handleNetworkData)
# slots
def handleNetworkData(self, reply):
img = QtGui.QImage()
tp = Point(reply.request().attribute(QtNetwork.QNetworkRequest.User))
url = reply.url()
if not reply.error():
if img.load(reply, None):
self.parent._tilePixmaps[tp] = QtGui.QPixmap.fromImage(img)
reply.deleteLater()
self.parent.updated.emit(self.tileRect(tp))
# purge unused tiles
bound = self.parent._tilesRect.adjusted(-2, -2, 2, 2)
for tp in list(self.parent._tilePixmaps.keys()):
if not bound.contains(tp):
del self.parent._tilePixmaps[tp]
self.download()
def download(self):
grab = None
for x in range(self.parent._tilesRect.width()):
for y in range(self.parent._tilesRect.height()):
tp = Point(self.parent._tilesRect.topLeft() + QtCore.QPoint(x, y))
if tp not in self.parent._tilePixmaps:
grab = QtCore.QPoint(tp)
break
if grab is None:
self._url = QtCore.QUrl()
return<|fim▁hole|> self._url = QtCore.QUrl(path)
request = QtNetwork.QNetworkRequest()
request.setUrl(self._url)
request.setRawHeader('User-Agent', 'Nokia (PyQt) Graphics Dojo 1.0')
request.setAttribute(QtNetwork.QNetworkRequest.User, grab)
self.get(request)
################################
def tileRect(self, tp):
t = tp - self.parent._tilesRect.topLeft()
x = t.x() * TDIM + self.parent._offset.x()
y = t.y() * TDIM + self.parent._offset.y()
return QtCore.QRect(x, y, TDIM, TDIM)
if __name__ == '__main__':
import sys
class MapZoom(QtGui.QMainWindow):
def __init__(self):
super(MapZoom, self).__init__(None)
self.map_ = LightMaps(self)
self.map_.setFocus()
self.setCentralWidget(self.map_)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('LightMaps')
w = MapZoom()
w.setWindowTitle("Slippy Map Demo")
w.resize(600, 450)
w.show()
sys.exit(app.exec_())<|fim▁end|> |
#path = 'http://tile.openstreetmap.org/%d/%d/%d.png' % (self.zoom, grab.x(), grab.y())
path = 'https://mts2.google.com/vt?lyrs=y&x={0}&y={1}&z={2}'.format(grab.x(), grab.y(), self.parent.zoom)
print path |
<|file_name|>IndexerSQLMetadataStorageCoordinatorTest.java<|end_file_name|><|fim▁begin|>/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.metadata;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.druid.indexing.overlord.DataSourceMetadata;
import io.druid.indexing.overlord.ObjectMetadata;
import io.druid.indexing.overlord.SegmentPublishResult;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.java.util.common.StringUtils;
import io.druid.timeline.DataSegment;
import io.druid.timeline.partition.LinearShardSpec;
import io.druid.timeline.partition.NoneShardSpec;
import io.druid.timeline.partition.NumberedShardSpec;
import org.joda.time.Interval;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.skife.jdbi.v2.Handle;
import org.skife.jdbi.v2.tweak.HandleCallback;
import org.skife.jdbi.v2.util.StringMapper;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
public class IndexerSQLMetadataStorageCoordinatorTest
{
@Rule
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
private final ObjectMapper mapper = new DefaultObjectMapper();
private final DataSegment defaultSegment = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"version",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new LinearShardSpec(0),
9,
100
);
private final DataSegment defaultSegment2 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"version",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new LinearShardSpec(1),
9,
100
);
private final DataSegment defaultSegment3 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-03T00Z/2015-01-04T00Z"),
"version",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
NoneShardSpec.instance(),
9,
100
);
// Overshadows defaultSegment, defaultSegment2
private final DataSegment defaultSegment4 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new LinearShardSpec(0),
9,
100
);
private final DataSegment numberedSegment0of0 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new NumberedShardSpec(0, 0),
9,
100
);
private final DataSegment numberedSegment1of0 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new NumberedShardSpec(1, 0),
9,
100
);
private final DataSegment numberedSegment2of0 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new NumberedShardSpec(2, 0),
9,
100
);
private final DataSegment numberedSegment2of1 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new NumberedShardSpec(2, 1),
9,
100
);
private final DataSegment numberedSegment3of1 = new DataSegment(
"fooDataSource",
Interval.parse("2015-01-01T00Z/2015-01-02T00Z"),
"zversion",
ImmutableMap.<String, Object>of(),
ImmutableList.of("dim1"),
ImmutableList.of("m1"),
new NumberedShardSpec(3, 1),
9,
100
);
private final Set<DataSegment> SEGMENTS = ImmutableSet.of(defaultSegment, defaultSegment2);
private final AtomicLong metadataUpdateCounter = new AtomicLong();
private IndexerSQLMetadataStorageCoordinator coordinator;
private TestDerbyConnector derbyConnector;
@Before
public void setUp()
{
derbyConnector = derbyConnectorRule.getConnector();
mapper.registerSubtypes(LinearShardSpec.class);
derbyConnector.createDataSourceTable();
derbyConnector.createTaskTables();
derbyConnector.createSegmentTable();
metadataUpdateCounter.set(0);
coordinator = new IndexerSQLMetadataStorageCoordinator(
mapper,
derbyConnectorRule.metadataTablesConfigSupplier().get(),
derbyConnector
)
{
@Override
protected DataSourceMetadataUpdateResult updateDataSourceMetadataWithHandle(
Handle handle,
String dataSource,
DataSourceMetadata startMetadata,
DataSourceMetadata endMetadata
) throws IOException
{
// Count number of times this method is called.
metadataUpdateCounter.getAndIncrement();
return super.updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
}
};
}
private void unUseSegment()
{
for (final DataSegment segment : SEGMENTS) {
Assert.assertEquals(
1, (int) derbyConnector.getDBI().<Integer>withHandle(
new HandleCallback<Integer>()
{
@Override
public Integer withHandle(Handle handle) throws Exception
{
return handle.createStatement(
StringUtils.format(
"UPDATE %s SET used = false WHERE id = :id",
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable()
)
).bind("id", segment.getIdentifier()).execute();
}
}
)
);
}
}
private List<String> getUsedIdentifiers()
{
final String table = derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
return derbyConnector.retryWithHandle(
new HandleCallback<List<String>>()
{
@Override
public List<String> withHandle(Handle handle) throws Exception
{
return handle.createQuery("SELECT id FROM " + table + " WHERE used = true ORDER BY id")
.map(StringMapper.FIRST)
.list();
}
}
);
}
@Test
public void testSimpleAnnounce() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
for (DataSegment segment : SEGMENTS) {
Assert.assertArrayEquals(
mapper.writeValueAsString(segment).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
segment.getIdentifier()
)
);
}
Assert.assertEquals(
ImmutableList.of(defaultSegment.getIdentifier(), defaultSegment2.getIdentifier()),
getUsedIdentifiers()
);
// Should not update dataSource metadata.
Assert.assertEquals(0, metadataUpdateCounter.get());
}
@Test
public void testOvershadowingAnnounce() throws IOException
{
final ImmutableSet<DataSegment> segments = ImmutableSet.of(defaultSegment, defaultSegment2, defaultSegment4);
coordinator.announceHistoricalSegments(segments);
for (DataSegment segment : segments) {
Assert.assertArrayEquals(
mapper.writeValueAsString(segment).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
segment.getIdentifier()
)
);
}
Assert.assertEquals(ImmutableList.of(defaultSegment4.getIdentifier()), getUsedIdentifiers());
}
@Test
public void testTransactionalAnnounceSuccess() throws IOException
{
// Insert first segment.
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "bar"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment), true), result1);
Assert.assertArrayEquals(
mapper.writeValueAsString(defaultSegment).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
defaultSegment.getIdentifier()
)
);
// Insert second segment.
final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment2),
new ObjectMetadata(ImmutableMap.of("foo", "bar")),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment2), true), result2);
Assert.assertArrayEquals(
mapper.writeValueAsString(defaultSegment2).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
defaultSegment2.getIdentifier()
)
);
// Examine metadata.
Assert.assertEquals(
new ObjectMetadata(ImmutableMap.of("foo", "baz")),
coordinator.getDataSourceMetadata("fooDataSource")
);
// Should only be tried once per call.
Assert.assertEquals(2, metadataUpdateCounter.get());
}
@Test
public void testTransactionalAnnounceRetryAndSuccess() throws IOException
{
final AtomicLong attemptCounter = new AtomicLong();
final IndexerSQLMetadataStorageCoordinator failOnceCoordinator = new IndexerSQLMetadataStorageCoordinator(
mapper,
derbyConnectorRule.metadataTablesConfigSupplier().get(),
derbyConnector
)
{
@Override
protected DataSourceMetadataUpdateResult updateDataSourceMetadataWithHandle(
Handle handle,
String dataSource,
DataSourceMetadata startMetadata,
DataSourceMetadata endMetadata
) throws IOException
{
metadataUpdateCounter.getAndIncrement();
if (attemptCounter.getAndIncrement() == 0) {
return DataSourceMetadataUpdateResult.TRY_AGAIN;
} else {
return super.updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
}
}
};
// Insert first segment.
final SegmentPublishResult result1 = failOnceCoordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "bar"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment), true), result1);
Assert.assertArrayEquals(
mapper.writeValueAsString(defaultSegment).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
defaultSegment.getIdentifier()
)
);
// Reset attempt counter to induce another failure.
attemptCounter.set(0);
// Insert second segment.
final SegmentPublishResult result2 = failOnceCoordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment2),
new ObjectMetadata(ImmutableMap.of("foo", "bar")),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment2), true), result2);
Assert.assertArrayEquals(
mapper.writeValueAsString(defaultSegment2).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
defaultSegment2.getIdentifier()
)
);
// Examine metadata.
Assert.assertEquals(
new ObjectMetadata(ImmutableMap.of("foo", "baz")),
failOnceCoordinator.getDataSourceMetadata("fooDataSource")
);
// Should be tried twice per call.
Assert.assertEquals(4, metadataUpdateCounter.get());
}
@Test
public void testTransactionalAnnounceFailDbNullWantNotNull() throws IOException
{
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(ImmutableMap.of("foo", "bar")),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false), result1);
// Should only be tried once.
Assert.assertEquals(1, metadataUpdateCounter.get());
}
@Test
public void testTransactionalAnnounceFailDbNotNullWantNull() throws IOException
{
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment), true), result1);
final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment2),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false), result2);
// Should only be tried once per call.
Assert.assertEquals(2, metadataUpdateCounter.get());
}
@Test
public void testTransactionalAnnounceFailDbNotNullWantDifferent() throws IOException
{
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.of(defaultSegment), true), result1);
final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment2),
new ObjectMetadata(ImmutableMap.of("foo", "qux")),
new ObjectMetadata(ImmutableMap.of("foo", "baz"))
);
Assert.assertEquals(new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false), result2);
// Should only be tried once per call.
Assert.assertEquals(2, metadataUpdateCounter.get());
}
@Test
public void testSimpleUsedList() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval()
)
)
);
}
@Test
public void testMultiIntervalUsedList() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment3));
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForIntervals(
defaultSegment.getDataSource(),
ImmutableList.of(defaultSegment.getInterval())
)
)
);
Assert.assertEquals(
ImmutableSet.of(defaultSegment3),
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForIntervals(
defaultSegment.getDataSource(),
ImmutableList.of(defaultSegment3.getInterval())
)
)
);
Assert.assertEquals(
ImmutableSet.of(defaultSegment, defaultSegment2, defaultSegment3),
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForIntervals(
defaultSegment.getDataSource(),
ImmutableList.of(defaultSegment.getInterval(), defaultSegment3.getInterval())
)
)
);
//case to check no duplication if two intervals overlapped with the interval of same segment.
Assert.assertEquals(
ImmutableList.of(defaultSegment3),
coordinator.getUsedSegmentsForIntervals(
defaultSegment.getDataSource(),
ImmutableList.of(
Interval.parse("2015-01-03T00Z/2015-01-03T05Z"),
Interval.parse("2015-01-03T09Z/2015-01-04T00Z")
)
)
);
}
@Test
public void testSimpleUnUsedList() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval()
)
)
);
}
@Test
public void testUsedOverlapLow() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Set<DataSegment> actualSegments = ImmutableSet.copyOf(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
Interval.parse("2014-12-31T23:59:59.999Z/2015-01-01T00:00:00.001Z") // end is exclusive
)
);
Assert.assertEquals(
SEGMENTS,
actualSegments
);
}
@Test
public void testUsedOverlapHigh() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
Interval.parse("2015-1-1T23:59:59.999Z/2015-02-01T00Z")
)
)
);
}
@Test
public void testUsedOutOfBoundsLow() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertTrue(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
new Interval(defaultSegment.getInterval().getStart().minus(1), defaultSegment.getInterval().getStart())
).isEmpty()
);
}
@Test
public void testUsedOutOfBoundsHigh() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertTrue(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
new Interval(defaultSegment.getInterval().getEnd(), defaultSegment.getInterval().getEnd().plusDays(10))
).isEmpty()
);
}
@Test
public void testUsedWithinBoundsEnd() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withEnd(defaultSegment.getInterval().getEnd().minusMillis(1))
)
)
);
}
@Test
public void testUsedOverlapEnd() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUsedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withEnd(defaultSegment.getInterval().getEnd().plusMillis(1))
)
)
);
}
@Test
public void testUnUsedOverlapLow() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertTrue(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
new Interval(
defaultSegment.getInterval().getStart().minus(1),
defaultSegment.getInterval().getStart().plus(1)
)
).isEmpty()
);
}
@Test
public void testUnUsedUnderlapLow() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertTrue(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
new Interval(defaultSegment.getInterval().getStart().plus(1), defaultSegment.getInterval().getEnd())
).isEmpty()
);
}
@Test
public void testUnUsedUnderlapHigh() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertTrue(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
new Interval(defaultSegment.getInterval().getStart(), defaultSegment.getInterval().getEnd().minus(1))
).isEmpty()
);
}
@Test
public void testUnUsedOverlapHigh() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertTrue(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withStart(defaultSegment.getInterval().getEnd().minus(1))
).isEmpty()
);
}
@Test
public void testUnUsedBigOverlap() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
Interval.parse("2000/2999")
)
)
);
}
@Test
public void testUnUsedLowRange() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withStart(defaultSegment.getInterval().getStart().minus(1))
)
)
);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withStart(defaultSegment.getInterval().getStart().minusYears(1))
)
)
);
}
@Test
public void testUnUsedHighRange() throws IOException
{
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withEnd(defaultSegment.getInterval().getEnd().plus(1))
)
)
);
Assert.assertEquals(
SEGMENTS,
ImmutableSet.copyOf(
coordinator.getUnusedSegmentsForInterval(
defaultSegment.getDataSource(),
defaultSegment.getInterval().withEnd(defaultSegment.getInterval().getEnd().plusYears(1))
)
)
);
}
@Test
public void testDeleteDataSourceMetadata() throws IOException
{
coordinator.announceHistoricalSegments(
ImmutableSet.of(defaultSegment),
new ObjectMetadata(null),
new ObjectMetadata(ImmutableMap.of("foo", "bar"))
);
Assert.assertEquals(
new ObjectMetadata(ImmutableMap.of("foo", "bar")),
coordinator.getDataSourceMetadata("fooDataSource")
);
Assert.assertFalse("deleteInvalidDataSourceMetadata", coordinator.deleteDataSourceMetadata("nonExistentDS"));
Assert.assertTrue("deleteValidDataSourceMetadata", coordinator.deleteDataSourceMetadata("fooDataSource"));
Assert.assertNull("getDataSourceMetadataNullAfterDelete", coordinator.getDataSourceMetadata("fooDataSource"));
}
@Test
public void testSingleAdditionalNumberedShardWithNoCorePartitions() throws IOException
{
additionalNumberedShardTest(ImmutableSet.of(numberedSegment0of0));
}
@Test
public void testMultipleAdditionalNumberedShardsWithNoCorePartitions() throws IOException<|fim▁hole|> @Test
public void testSingleAdditionalNumberedShardWithOneCorePartition() throws IOException
{
additionalNumberedShardTest(ImmutableSet.of(numberedSegment2of1));
}
@Test
public void testMultipleAdditionalNumberedShardsWithOneCorePartition() throws IOException
{
additionalNumberedShardTest(ImmutableSet.of(numberedSegment2of1, numberedSegment3of1));
}
private void additionalNumberedShardTest(Set<DataSegment> segments) throws IOException
{
coordinator.announceHistoricalSegments(segments);
for (DataSegment segment : segments) {
Assert.assertArrayEquals(
mapper.writeValueAsString(segment).getBytes("UTF-8"),
derbyConnector.lookup(
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
"id",
"payload",
segment.getIdentifier()
)
);
}
Assert.assertEquals(
segments.stream().map(DataSegment::getIdentifier).collect(Collectors.toList()),
getUsedIdentifiers()
);
// Should not update dataSource metadata.
Assert.assertEquals(0, metadataUpdateCounter.get());
}
}<|fim▁end|> | {
additionalNumberedShardTest(ImmutableSet.of(numberedSegment0of0, numberedSegment1of0, numberedSegment2of0));
}
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import importlib
def import_string(import_name: str):
"""
Import an object based on the import string.
<|fim▁hole|> raise RuntimeError(
f'{import_name} must separate module from object with ":". '
f'For example, "linguee_api.downloaders:HTTPXDownloader"'
)
module_name, object_name = import_name.rsplit(":", 1)
mod = importlib.import_module(module_name)
return getattr(mod, object_name)<|fim▁end|> | Separate module name from the object name with ":". For example,
"linuguee_api.downloaders:HTTPXDownloader"
"""
if ":" not in import_name: |
<|file_name|>fields.py<|end_file_name|><|fim▁begin|>"""Contain common module fields."""
# pylint: disable=too-many-public-methods
from __future__ import absolute_import
import re
from django.db import models
from django.core.exceptions import ValidationError
class NameField(models.CharField):
"""Item name string field.
This field is limited to 64 characters and contains the name(string).
Good examples:
* "name_lastname"
* "name@lasrname"
* 64 characters name
Bad examples:
* 65 characters name
"""
MAX_LEN = 150
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(NameField, self).__init__(*args, max_length=max_length,
**kwargs)
class DynamicIPAddressField(models.CharField):
"""DNS name or IP address."""
MAX_LEN = 64
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(DynamicIPAddressField, self).__init__(*args,
max_length=max_length,
**kwargs)
class MACAddressField(models.CharField):
"""MAC address field."""
MAX_LEN = 17 # enables writing exactly 16 characters
MAC_ADDRESS_REGEX = '(([0-9a-fA-F]{2}):){5}[0-9a-fA-F]{2}'
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(MACAddressField, self).__init__(*args,
max_length=max_length,
**kwargs)
def validate(self, value, model_instance):
"""Validate that the input value is a MAC address."""
super(MACAddressField, self).validate(value, model_instance)
if re.match(self.MAC_ADDRESS_REGEX, value) is None:
raise ValidationError('The input MAC address does not match the '
'pattern of a MAC address')
class PathField(models.CharField):
r"""File-system path string field.
This field is limited to 200 characters and contains string path split by
slashes or backslashes.
Good examples:
* "/mnt/home/code/a.txt"
* "/./a"
* "c:\\windows\\temp"
Bad examples:
* "//mnt//@%$2"
* "c:\;"
"""
MAX_LEN = 200
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(PathField, self).__init__(*args, max_length=max_length,
**kwargs)
class VersionField(models.CharField):
"""Item version string field.
This field is limited to 10 characters and contains numbers and characters
separated by dots.
Good examples:
* "4.12F"
* "1.1423"
<|fim▁hole|>
* "4,12F"
* "1/1423"
"""
MAX_LEN = 10
def __init__(self, max_length=MAX_LEN, *args, **kwargs):
super(VersionField, self).__init__(*args, max_length=max_length,
**kwargs)
class PortField(models.PositiveSmallIntegerField):
"""Port number field (for IP connections)."""
pass<|fim▁end|> | Bad examples: |
<|file_name|>Sizzle.js<|end_file_name|><|fim▁begin|>// #ifndef jquery
/*
* Sizzle CSS Selector Engine
* Copyright, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
* More information: http://sizzlejs.com/
*/
(function(){
var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
expando = "sizcache",
done = 0,
toString = Object.prototype.toString,
hasDuplicate = false,
baseHasDuplicate = true,
rBackslash = /\\/g,
rReturn = /\r\n/g,
rNonWord = /\W/;
// Here we check if the JavaScript engine is using some sort of
// optimization where it does not always call our comparision
// function. If that is the case, discard the hasDuplicate value.
// Thus far that includes Google Chrome.
[0, 0].sort(function() {
baseHasDuplicate = false;
return 0;
});
var Sizzle = function( selector, context, results, seed ) {
results = results || [];
context = context || document;
var origContext = context;
if ( context.nodeType !== 1 && context.nodeType !== 9 ) {
return [];
}
if ( !selector || typeof selector !== "string" ) {
return results;
}
var m, set, checkSet, extra, ret, cur, pop, i,
prune = true,
contextXML = Sizzle.isXML( context ),
parts = [],
soFar = selector;
// Reset the position of the chunker regexp (start from head)
do {
chunker.exec( "" );
m = chunker.exec( soFar );
if ( m ) {
soFar = m[3];
parts.push( m[1] );
if ( m[2] ) {
extra = m[3];
break;
}
}
} while ( m );
if ( parts.length > 1 && origPOS.exec( selector ) ) {
if ( parts.length === 2 && Expr.relative[ parts[0] ] ) {
set = posProcess( parts[0] + parts[1], context, seed );
} else {
set = Expr.relative[ parts[0] ] ?
[ context ] :
Sizzle( parts.shift(), context );
while ( parts.length ) {
selector = parts.shift();
if ( Expr.relative[ selector ] ) {
selector += parts.shift();
}
set = posProcess( selector, set, seed );
}
}
} else {
// Take a shortcut and set the context if the root selector is an ID
// (but not if it'll be faster if the inner selector is an ID)
if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML &&
Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) {
ret = Sizzle.find( parts.shift(), context, contextXML );
context = ret.expr ?
Sizzle.filter( ret.expr, ret.set )[0] :
ret.set[0];
}
if ( context ) {
ret = seed ?
{ expr: parts.pop(), set: makeArray(seed) } :
Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML );
set = ret.expr ?
Sizzle.filter( ret.expr, ret.set ) :
ret.set;
if ( parts.length > 0 ) {
checkSet = makeArray( set );
} else {
prune = false;
}
while ( parts.length ) {
cur = parts.pop();
pop = cur;
if ( !Expr.relative[ cur ] ) {
cur = "";
} else {
pop = parts.pop();
}
if ( pop == null ) {
pop = context;
}
Expr.relative[ cur ]( checkSet, pop, contextXML );
}
} else {
checkSet = parts = [];
}
}
if ( !checkSet ) {
checkSet = set;
}
if ( !checkSet ) {
Sizzle.error( cur || selector );
}
if ( toString.call(checkSet) === "[object Array]" ) {
if ( !prune ) {
results.push.apply( results, checkSet );
} else if ( context && context.nodeType === 1 ) {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) {
results.push( set[i] );
}
}
} else {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && checkSet[i].nodeType === 1 ) {
results.push( set[i] );
}
}
}
} else {
makeArray( checkSet, results );
}
if ( extra ) {
Sizzle( extra, origContext, results, seed );
Sizzle.uniqueSort( results );
}
return results;
};
Sizzle.uniqueSort = function( results ) {
if ( sortOrder ) {
hasDuplicate = baseHasDuplicate;
results.sort( sortOrder );
if ( hasDuplicate ) {
for ( var i = 1; i < results.length; i++ ) {
if ( results[i] === results[ i - 1 ] ) {
results.splice( i--, 1 );
}
}
}
}
return results;
};
Sizzle.matches = function( expr, set ) {
return Sizzle( expr, null, null, set );
};
Sizzle.matchesSelector = function( node, expr ) {
return Sizzle( expr, null, null, [node] ).length > 0;
};
Sizzle.find = function( expr, context, isXML ) {
var set, i, len, match, type, left;
if ( !expr ) {
return [];
}
for ( i = 0, len = Expr.order.length; i < len; i++ ) {
type = Expr.order[i];
if ( (match = Expr.leftMatch[ type ].exec( expr )) ) {
left = match[1];
match.splice( 1, 1 );
if ( left.substr( left.length - 1 ) !== "\\" ) {
match[1] = (match[1] || "").replace( rBackslash, "" );
set = Expr.find[ type ]( match, context, isXML );
if ( set != null ) {
expr = expr.replace( Expr.match[ type ], "" );
break;
}
}
}
}
if ( !set ) {
set = typeof context.getElementsByTagName !== "undefined" ?
context.getElementsByTagName( "*" ) :
[];
}
return { set: set, expr: expr };
};
Sizzle.filter = function( expr, set, inplace, not ) {
var match, anyFound,
type, found, item, filter, left,
i, pass,
old = expr,
result = [],
curLoop = set,
isXMLFilter = set && set[0] && Sizzle.isXML( set[0] );
while ( expr && set.length ) {
for ( type in Expr.filter ) {
if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) {
filter = Expr.filter[ type ];
left = match[1];
anyFound = false;
match.splice(1,1);
if ( left.substr( left.length - 1 ) === "\\" ) {
continue;
}
if ( curLoop === result ) {
result = [];
}
if ( Expr.preFilter[ type ] ) {
match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter );
if ( !match ) {
anyFound = found = true;
} else if ( match === true ) {
continue;
}
}
if ( match ) {
for ( i = 0; (item = curLoop[i]) != null; i++ ) {
if ( item ) {
found = filter( item, match, i, curLoop );
pass = not ^ found;
if ( inplace && found != null ) {
if ( pass ) {
anyFound = true;
} else {
curLoop[i] = false;
}
} else if ( pass ) {
result.push( item );
anyFound = true;
}
}
}
}
if ( found !== undefined ) {
if ( !inplace ) {
curLoop = result;
}
expr = expr.replace( Expr.match[ type ], "" );
if ( !anyFound ) {
return [];
}
break;
}
}
}
// Improper expression
if ( expr === old ) {
if ( anyFound == null ) {
Sizzle.error( expr );
} else {
break;
}
}
old = expr;
}
return curLoop;
};
Sizzle.error = function( msg ) {
throw new Error( "Syntax error, unrecognized expression: " + msg );
};
/**
* Utility function for retreiving the text value of an array of DOM nodes
* @param {Array|Element} elem
*/
var getText = Sizzle.getText = function( elem ) {
var i, node,
nodeType = elem.nodeType,
ret = "";
if ( nodeType ) {
if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
// Use textContent || innerText for elements
if ( typeof elem.textContent === 'string' ) {
return elem.textContent;
} else if ( typeof elem.innerText === 'string' ) {
// Replace IE's carriage returns
return elem.innerText.replace( rReturn, '' );
} else {
// Traverse it's children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling) {
ret += getText( elem );
}
}
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
} else {
// If no nodeType, this is expected to be an array
for ( i = 0; (node = elem[i]); i++ ) {
// Do not traverse comment nodes
if ( node.nodeType !== 8 ) {
ret += getText( node );
}
}
}
return ret;
};
var Expr = Sizzle.selectors = {
order: [ "ID", "NAME", "TAG" ],
match: {
ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,
ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,
TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,
CHILD: /:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,
POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,
PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/
},
leftMatch: {},
attrMap: {
"class": "className",
"for": "htmlFor"
},
attrHandle: {
href: function( elem ) {
return elem.getAttribute( "href" );
},
type: function( elem ) {
return elem.getAttribute( "type" );
}
},
relative: {
"+": function(checkSet, part){
var isPartStr = typeof part === "string",
isTag = isPartStr && !rNonWord.test( part ),
isPartStrNotTag = isPartStr && !isTag;
if ( isTag ) {
part = part.toLowerCase();
}
for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) {
if ( (elem = checkSet[i]) ) {
while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {}
checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ?
elem || false :
elem === part;
}
}
if ( isPartStrNotTag ) {
Sizzle.filter( part, checkSet, true );
}
},
">": function( checkSet, part ) {
var elem,
isPartStr = typeof part === "string",
i = 0,
l = checkSet.length;
if ( isPartStr && !rNonWord.test( part ) ) {
part = part.toLowerCase();
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
var parent = elem.parentNode;
checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false;
}
}
} else {
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
checkSet[i] = isPartStr ?
elem.parentNode :
elem.parentNode === part;
}
}
if ( isPartStr ) {
Sizzle.filter( part, checkSet, true );
}
}
},
"": function(checkSet, part, isXML){
var nodeCheck,
doneName = done++,
checkFn = dirCheck;
if ( typeof part === "string" && !rNonWord.test( part ) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn( "parentNode", part, doneName, checkSet, nodeCheck, isXML );
},
"~": function( checkSet, part, isXML ) {
var nodeCheck,
doneName = done++,
checkFn = dirCheck;
if ( typeof part === "string" && !rNonWord.test( part ) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn( "previousSibling", part, doneName, checkSet, nodeCheck, isXML );
}
},
find: {
ID: function( match, context, isXML ) {
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
return m && m.parentNode ? [m] : [];
}
},
NAME: function( match, context ) {
if ( typeof context.getElementsByName !== "undefined" ) {
var ret = [],
results = context.getElementsByName( match[1] );
for ( var i = 0, l = results.length; i < l; i++ ) {
if ( results[i].getAttribute("name") === match[1] ) {
ret.push( results[i] );
}
}
return ret.length === 0 ? null : ret;
}
},
TAG: function( match, context ) {
if ( typeof context.getElementsByTagName !== "undefined" ) {
return context.getElementsByTagName( match[1] );
}
}
},
preFilter: {
CLASS: function( match, curLoop, inplace, result, not, isXML ) {
match = " " + match[1].replace( rBackslash, "" ) + " ";
if ( isXML ) {
return match;
}
for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) {
if ( elem ) {
if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n\r]/g, " ").indexOf(match) >= 0) ) {
if ( !inplace ) {
result.push( elem );
}
} else if ( inplace ) {
curLoop[i] = false;
}
}
}
return false;
},
ID: function( match ) {
return match[1].replace( rBackslash, "" );
},
TAG: function( match, curLoop ) {
return match[1].replace( rBackslash, "" ).toLowerCase();
},
CHILD: function( match ) {
if ( match[1] === "nth" ) {
if ( !match[2] ) {
Sizzle.error( match[0] );
}
match[2] = match[2].replace(/^\+|\s*/g, '');
// parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
var test = /(-?)(\d*)(?:n([+\-]?\d*))?/.exec(
match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" ||
!/\D/.test( match[2] ) && "0n+" + match[2] || match[2]);
// calculate the numbers (first)n+(last) including if they are negative
match[2] = (test[1] + (test[2] || 1)) - 0;
match[3] = test[3] - 0;
}
else if ( match[2] ) {
Sizzle.error( match[0] );
}
// TODO: Move to normal caching system
match[0] = done++;
return match;
},
ATTR: function( match, curLoop, inplace, result, not, isXML ) {
var name = match[1] = match[1].replace( rBackslash, "" );
if ( !isXML && Expr.attrMap[name] ) {
match[1] = Expr.attrMap[name];
}
// Handle if an un-quoted value was used
match[4] = ( match[4] || match[5] || "" ).replace( rBackslash, "" );
if ( match[2] === "~=" ) {
match[4] = " " + match[4] + " ";
}
return match;
},
PSEUDO: function( match, curLoop, inplace, result, not ) {
if ( match[1] === "not" ) {
// If we're dealing with a complex expression, or a simple one
if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) {
match[3] = Sizzle(match[3], null, null, curLoop);
} else {
var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not);
if ( !inplace ) {
result.push.apply( result, ret );
}
return false;
}
} else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) {
return true;
}
return match;
},
POS: function( match ) {
match.unshift( true );
return match;
}
},
filters: {
enabled: function( elem ) {
return elem.disabled === false && elem.type !== "hidden";
},
disabled: function( elem ) {
return elem.disabled === true;
},
checked: function( elem ) {
return elem.checked === true;
},
selected: function( elem ) {
// Accessing this property makes selected-by-default
// options in Safari work properly
if ( elem.parentNode ) {
elem.parentNode.selectedIndex;
}
return elem.selected === true;
},
parent: function( elem ) {
return !!elem.firstChild;
},
empty: function( elem ) {
return !elem.firstChild;
},
has: function( elem, i, match ) {
return !!Sizzle( match[3], elem ).length;
},
header: function( elem ) {
return (/h\d/i).test( elem.nodeName );
},
text: function( elem ) {
var attr = elem.getAttribute( "type" ), type = elem.type;
// IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc)
// use getAttribute instead to test this case
return elem.nodeName.toLowerCase() === "input" && "text" === type && ( attr === type || attr === null );
},
radio: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "radio" === elem.type;
},
checkbox: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "checkbox" === elem.type;
},
file: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "file" === elem.type;
},
password: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "password" === elem.type;
},
submit: function( elem ) {
var name = elem.nodeName.toLowerCase();
return (name === "input" || name === "button") && "submit" === elem.type;
},
image: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "image" === elem.type;
},
reset: function( elem ) {
var name = elem.nodeName.toLowerCase();
return (name === "input" || name === "button") && "reset" === elem.type;
},
button: function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && "button" === elem.type || name === "button";
},
input: function( elem ) {
return (/input|select|textarea|button/i).test( elem.nodeName );
},
focus: function( elem ) {
return elem === elem.ownerDocument.activeElement;
}
},
setFilters: {
first: function( elem, i ) {
return i === 0;
},
last: function( elem, i, match, array ) {
return i === array.length - 1;
},
even: function( elem, i ) {
return i % 2 === 0;
},
odd: function( elem, i ) {
return i % 2 === 1;
},
lt: function( elem, i, match ) {
return i < match[3] - 0;
},
gt: function( elem, i, match ) {
return i > match[3] - 0;
},
nth: function( elem, i, match ) {
return match[3] - 0 === i;
},
eq: function( elem, i, match ) {
return match[3] - 0 === i;
}
},
filter: {
PSEUDO: function( elem, match, i, array ) {
var name = match[1],
filter = Expr.filters[ name ];
if ( filter ) {
return filter( elem, i, match, array );
} else if ( name === "contains" ) {
return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0;
} else if ( name === "not" ) {
var not = match[3];
for ( var j = 0, l = not.length; j < l; j++ ) {
if ( not[j] === elem ) {
return false;
}
}
return true;
} else {
Sizzle.error( name );
}
},
CHILD: function( elem, match ) {
var first, last,
doneName, parent, cache,
count, diff,
type = match[1],
node = elem;
switch ( type ) {
case "only":
case "first":
while ( (node = node.previousSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
if ( type === "first" ) {
return true;
}
node = elem;
/* falls through */
case "last":
while ( (node = node.nextSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
return true;
case "nth":
first = match[2];
last = match[3];
if ( first === 1 && last === 0 ) {
return true;
}
doneName = match[0];
parent = elem.parentNode;
if ( parent && (parent[ expando ] !== doneName || !elem.nodeIndex) ) {
count = 0;
for ( node = parent.firstChild; node; node = node.nextSibling ) {
if ( node.nodeType === 1 ) {
node.nodeIndex = ++count;
}
}
parent[ expando ] = doneName;
}
diff = elem.nodeIndex - last;
if ( first === 0 ) {
return diff === 0;
} else {
return ( diff % first === 0 && diff / first >= 0 );
}
}
},
ID: function( elem, match ) {
return elem.nodeType === 1 && elem.getAttribute("id") === match;
},
TAG: function( elem, match ) {
return (match === "*" && elem.nodeType === 1) || !!elem.nodeName && elem.nodeName.toLowerCase() === match;
},
CLASS: function( elem, match ) {
return (" " + (elem.className || elem.getAttribute("class")) + " ")
.indexOf( match ) > -1;
},
ATTR: function( elem, match ) {
var name = match[1],
result = Sizzle.attr ?
Sizzle.attr( elem, name ) :
Expr.attrHandle[ name ] ?
Expr.attrHandle[ name ]( elem ) :
elem[ name ] != null ?
elem[ name ] :
elem.getAttribute( name ),
value = result + "",
type = match[2],
check = match[4];
return result == null ?
type === "!=" :
!type && Sizzle.attr ?
result != null :
type === "=" ?
value === check :
type === "*=" ?
value.indexOf(check) >= 0 :
type === "~=" ?
(" " + value + " ").indexOf(check) >= 0 :
!check ?
value && result !== false :
type === "!=" ?
value !== check :
type === "^=" ?
value.indexOf(check) === 0 :
type === "$=" ?
value.substr(value.length - check.length) === check :
type === "|=" ?
value === check || value.substr(0, check.length + 1) === check + "-" :
false;
},
POS: function( elem, match, i, array ) {
var name = match[2],
filter = Expr.setFilters[ name ];
if ( filter ) {
return filter( elem, i, match, array );
}
}
}
};
var origPOS = Expr.match.POS,
fescape = function(all, num){
return "\\" + (num - 0 + 1);
};
for ( var type in Expr.match ) {
Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) );
Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) );
}
// Expose origPOS
// "global" as in regardless of relation to brackets/parens
Expr.match.globalPOS = origPOS;
var makeArray = function( array, results ) {
array = Array.prototype.slice.call( array, 0 );
if ( results ) {
results.push.apply( results, array );
return results;
}
return array;
};
// Perform a simple check to determine if the browser is capable of
// converting a NodeList to an array using builtin methods.
// Also verifies that the returned array holds DOM nodes
// (which is not the case in the Blackberry browser)
try {
Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType;
// Provide a fallback method if it does not work
} catch( e ) {
makeArray = function( array, results ) {
var i = 0,
ret = results || [];
if ( toString.call(array) === "[object Array]" ) {
Array.prototype.push.apply( ret, array );
} else {
if ( typeof array.length === "number" ) {
for ( var l = array.length; i < l; i++ ) {
ret.push( array[i] );
}
} else {
for ( ; array[i]; i++ ) {
ret.push( array[i] );
}
}
}
return ret;
};
}
var sortOrder, siblingCheck;
if ( document.documentElement.compareDocumentPosition ) {
sortOrder = function( a, b ) {
if ( a === b ) {
hasDuplicate = true;
return 0;
}
if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) {
return a.compareDocumentPosition ? -1 : 1;
}
return a.compareDocumentPosition(b) & 4 ? -1 : 1;
};
} else {
sortOrder = function( a, b ) {
// The nodes are identical, we can exit early
if ( a === b ) {
hasDuplicate = true;
return 0;
// Fallback to using sourceIndex (in IE) if it's available on both nodes
} else if ( a.sourceIndex && b.sourceIndex ) {
return a.sourceIndex - b.sourceIndex;
}
var al, bl,
ap = [],
bp = [],
aup = a.parentNode,
bup = b.parentNode,
cur = aup;
// If the nodes are siblings (or identical) we can do a quick check
if ( aup === bup ) {
return siblingCheck( a, b );
// If no parents were found then the nodes are disconnected
} else if ( !aup ) {
return -1;
} else if ( !bup ) {
return 1;
}
// Otherwise they're somewhere else in the tree so we need
// to build up a full list of the parentNodes for comparison
while ( cur ) {
ap.unshift( cur );
cur = cur.parentNode;
}
cur = bup;
while ( cur ) {
bp.unshift( cur );
cur = cur.parentNode;
}
al = ap.length;
bl = bp.length;
// Start walking down the tree looking for a discrepancy
for ( var i = 0; i < al && i < bl; i++ ) {
if ( ap[i] !== bp[i] ) {
return siblingCheck( ap[i], bp[i] );
}
}<|fim▁hole|> siblingCheck( ap[i], b, 1 );
};
siblingCheck = function( a, b, ret ) {
if ( a === b ) {
return ret;
}
var cur = a.nextSibling;
while ( cur ) {
if ( cur === b ) {
return -1;
}
cur = cur.nextSibling;
}
return 1;
};
}
// Check to see if the browser returns elements by name when
// querying by getElementById (and provide a workaround)
(function(){
// We're going to inject a fake input element with a specified name
var form = document.createElement("div"),
id = "script" + (new Date()).getTime(),
root = document.documentElement;
form.innerHTML = "<a name='" + id + "'/>";
// Inject it into the root element, check its status, and remove it quickly
root.insertBefore( form, root.firstChild );
// The workaround has to do additional checks after a getElementById
// Which slows things down for other browsers (hence the branching)
if ( document.getElementById( id ) ) {
Expr.find.ID = function( match, context, isXML ) {
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
return m ?
m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ?
[m] :
undefined :
[];
}
};
Expr.filter.ID = function( elem, match ) {
var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id");
return elem.nodeType === 1 && node && node.nodeValue === match;
};
}
root.removeChild( form );
// release memory in IE
root = form = null;
})();
(function(){
// Check to see if the browser returns only elements
// when doing getElementsByTagName("*")
// Create a fake element
var div = document.createElement("div");
div.appendChild( document.createComment("") );
// Make sure no comments are found
if ( div.getElementsByTagName("*").length > 0 ) {
Expr.find.TAG = function( match, context ) {
var results = context.getElementsByTagName( match[1] );
// Filter out possible comments
if ( match[1] === "*" ) {
var tmp = [];
for ( var i = 0; results[i]; i++ ) {
if ( results[i].nodeType === 1 ) {
tmp.push( results[i] );
}
}
results = tmp;
}
return results;
};
}
// Check to see if an attribute returns normalized href attributes
div.innerHTML = "<a href='#'></a>";
if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" &&
div.firstChild.getAttribute("href") !== "#" ) {
Expr.attrHandle.href = function( elem ) {
return elem.getAttribute( "href", 2 );
};
}
// release memory in IE
div = null;
})();
if ( document.querySelectorAll ) {
(function(){
var oldSizzle = Sizzle,
div = document.createElement("div"),
id = "__sizzle__";
div.innerHTML = "<p class='TEST'></p>";
// Safari can't handle uppercase or unicode characters when
// in quirks mode.
if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
return;
}
Sizzle = function( query, context, extra, seed ) {
context = context || document;
// Only use querySelectorAll on non-XML documents
// (ID selectors don't work in non-HTML documents)
if ( !seed && !Sizzle.isXML(context) ) {
// See if we find a selector to speed up
var match = /^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec( query );
if ( match && (context.nodeType === 1 || context.nodeType === 9) ) {
// Speed-up: Sizzle("TAG")
if ( match[1] ) {
return makeArray( context.getElementsByTagName( query ), extra );
// Speed-up: Sizzle(".CLASS")
} else if ( match[2] && Expr.find.CLASS && context.getElementsByClassName ) {
return makeArray( context.getElementsByClassName( match[2] ), extra );
}
}
if ( context.nodeType === 9 ) {
// Speed-up: Sizzle("body")
// The body element only exists once, optimize finding it
if ( query === "body" && context.body ) {
return makeArray( [ context.body ], extra );
// Speed-up: Sizzle("#ID")
} else if ( match && match[3] ) {
var elem = context.getElementById( match[3] );
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
if ( elem && elem.parentNode ) {
// Handle the case where IE and Opera return items
// by name instead of ID
if ( elem.id === match[3] ) {
return makeArray( [ elem ], extra );
}
} else {
return makeArray( [], extra );
}
}
try {
return makeArray( context.querySelectorAll(query), extra );
} catch(qsaError) {}
// qSA works strangely on Element-rooted queries
// We can work around this by specifying an extra ID on the root
// and working up from there (Thanks to Andrew Dupont for the technique)
// IE 8 doesn't work on object elements
} else if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) {
var oldContext = context,
old = context.getAttribute( "id" ),
nid = old || id,
hasParent = context.parentNode,
relativeHierarchySelector = /^\s*[+~]/.test( query );
if ( !old ) {
context.setAttribute( "id", nid );
} else {
nid = nid.replace( /'/g, "\\$&" );
}
if ( relativeHierarchySelector && hasParent ) {
context = context.parentNode;
}
try {
if ( !relativeHierarchySelector || hasParent ) {
return makeArray( context.querySelectorAll( "[id='" + nid + "'] " + query ), extra );
}
} catch(pseudoError) {
} finally {
if ( !old ) {
oldContext.removeAttribute( "id" );
}
}
}
}
return oldSizzle(query, context, extra, seed);
};
for ( var prop in oldSizzle ) {
Sizzle[ prop ] = oldSizzle[ prop ];
}
// release memory in IE
div = null;
})();
}
(function(){
var html = document.documentElement,
matches = html.matchesSelector || html.mozMatchesSelector || html.webkitMatchesSelector || html.msMatchesSelector;
if ( matches ) {
// Check to see if it's possible to do matchesSelector
// on a disconnected node (IE 9 fails this)
var disconnectedMatch = !matches.call( document.createElement( "div" ), "div" ),
pseudoWorks = false;
try {
// This should fail with an exception
// Gecko does not error, returns false instead
matches.call( document.documentElement, "[test!='']:sizzle" );
} catch( pseudoError ) {
pseudoWorks = true;
}
Sizzle.matchesSelector = function( node, expr ) {
// Make sure that attribute selectors are quoted
expr = expr.replace(/\=\s*([^'"\]]*)\s*\]/g, "='$1']");
if ( !Sizzle.isXML( node ) ) {
try {
if ( pseudoWorks || !Expr.match.PSEUDO.test( expr ) && !/!=/.test( expr ) ) {
var ret = matches.call( node, expr );
// IE 9's matchesSelector returns false on disconnected nodes
if ( ret || !disconnectedMatch ||
// As well, disconnected nodes are said to be in a document
// fragment in IE 9, so check for that
node.document && node.document.nodeType !== 11 ) {
return ret;
}
}
} catch(e) {}
}
return Sizzle(expr, null, null, [node]).length > 0;
};
}
})();
(function(){
var div = document.createElement("div");
div.innerHTML = "<div class='test e'></div><div class='test'></div>";
// Opera can't find a second classname (in 9.6)
// Also, make sure that getElementsByClassName actually exists
if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) {
return;
}
// Safari caches class attributes, doesn't catch changes (in 3.2)
div.lastChild.className = "e";
if ( div.getElementsByClassName("e").length === 1 ) {
return;
}
Expr.order.splice(1, 0, "CLASS");
Expr.find.CLASS = function( match, context, isXML ) {
if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) {
return context.getElementsByClassName(match[1]);
}
};
// release memory in IE
div = null;
})();
function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
var match = false;
elem = elem[dir];
while ( elem ) {
if ( elem[ expando ] === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 && !isXML ){
elem[ expando ] = doneName;
elem.sizset = i;
}
if ( elem.nodeName.toLowerCase() === cur ) {
match = elem;
break;
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
var match = false;
elem = elem[dir];
while ( elem ) {
if ( elem[ expando ] === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 ) {
if ( !isXML ) {
elem[ expando ] = doneName;
elem.sizset = i;
}
if ( typeof cur !== "string" ) {
if ( elem === cur ) {
match = true;
break;
}
} else if ( Sizzle.filter( cur, [elem] ).length > 0 ) {
match = elem;
break;
}
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
if ( document.documentElement.contains ) {
Sizzle.contains = function( a, b ) {
return a !== b && (a.contains ? a.contains(b) : true);
};
} else if ( document.documentElement.compareDocumentPosition ) {
Sizzle.contains = function( a, b ) {
return !!(a.compareDocumentPosition(b) & 16);
};
} else {
Sizzle.contains = function() {
return false;
};
}
Sizzle.isXML = function( elem ) {
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
};
var posProcess = function( selector, context, seed ) {
var match,
tmpSet = [],
later = "",
root = context.nodeType ? [context] : context;
// Position selectors must be done after the filter
// And so must :not(positional) so we move all PSEUDOs to the end
while ( (match = Expr.match.PSEUDO.exec( selector )) ) {
later += match[0];
selector = selector.replace( Expr.match.PSEUDO, "" );
}
selector = Expr.relative[selector] ? selector + "*" : selector;
for ( var i = 0, l = root.length; i < l; i++ ) {
Sizzle( selector, root[i], tmpSet, seed );
}
return Sizzle.filter( later, tmpSet );
};
// EXPOSE
window.tinymce.dom.Sizzle = Sizzle;
})();
// #endif<|fim▁end|> |
// We ended someplace up the tree so do a sibling check
return i === al ?
siblingCheck( a, bp[i], -1 ) : |
<|file_name|>authentication.py<|end_file_name|><|fim▁begin|># accounts/authentication.py
import requests
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
logger = logging.getLogger(__name__)
User = get_user_model()
PERSONA_VERIFY_URL = 'https://verifier.login.persona.org/verify'
#DOMAIN = 'localhost'
#DOMAIN = 'http://hotzenplotz.pythonanywhere.com'
class PersonaAuthenticationBackend(object):
def authenticate(self, assertion):
logging.warning('entering authenticate function')
response = requests.post(
PERSONA_VERIFY_URL,
data = {'assertion': assertion, 'audience': settings.DOMAIN}
)
logging.warning('got response from persona')<|fim▁hole|> try:
return User.objects.get(email=email)
except User.DoesNotExist:
return User.objects.create(email=email)
else:
logger.warning(
'Persona says no. Json was: {}'.format(response.json())
)
def get_user(self, email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None<|fim▁end|> | logging.warning(response.content.decode())
if response.ok and response.json()['status'] == 'okay':
email = response.json()['email'] |
<|file_name|>bitrate_prober.cc<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/pacing/bitrate_prober.h"
#include <assert.h>
#include <limits>
#include <sstream>
#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
namespace {
int ComputeDeltaFromBitrate(size_t packet_size, int bitrate_bps) {
assert(bitrate_bps > 0);<|fim▁hole|> return static_cast<int>(1000ll * static_cast<int64_t>(packet_size) * 8ll /
bitrate_bps);
}
} // namespace
BitrateProber::BitrateProber()
: probing_state_(kDisabled),
packet_size_last_send_(0),
time_last_send_ms_(-1) {
}
void BitrateProber::SetEnabled(bool enable) {
if (enable) {
if (probing_state_ == kDisabled) {
probing_state_ = kAllowedToProbe;
LOG(LS_INFO) << "Initial bandwidth probing enabled";
}
} else {
probing_state_ = kDisabled;
LOG(LS_INFO) << "Initial bandwidth probing disabled";
}
}
bool BitrateProber::IsProbing() const {
return probing_state_ == kProbing;
}
void BitrateProber::MaybeInitializeProbe(int bitrate_bps) {
if (probing_state_ != kAllowedToProbe)
return;
probe_bitrates_.clear();
// Max number of packets used for probing.
const int kMaxNumProbes = 2;
const int kPacketsPerProbe = 5;
const float kProbeBitrateMultipliers[kMaxNumProbes] = {3, 6};
int bitrates_bps[kMaxNumProbes];
std::stringstream bitrate_log;
bitrate_log << "Start probing for bandwidth, bitrates:";
for (int i = 0; i < kMaxNumProbes; ++i) {
bitrates_bps[i] = kProbeBitrateMultipliers[i] * bitrate_bps;
bitrate_log << " " << bitrates_bps[i];
// We need one extra to get 5 deltas for the first probe.
if (i == 0)
probe_bitrates_.push_back(bitrates_bps[i]);
for (int j = 0; j < kPacketsPerProbe; ++j)
probe_bitrates_.push_back(bitrates_bps[i]);
}
bitrate_log << ", num packets: " << probe_bitrates_.size();
LOG(LS_INFO) << bitrate_log.str().c_str();
probing_state_ = kProbing;
}
int BitrateProber::TimeUntilNextProbe(int64_t now_ms) {
if (probing_state_ != kDisabled && probe_bitrates_.empty()) {
probing_state_ = kWait;
}
if (probe_bitrates_.empty()) {
// No probe started, or waiting for next probe.
return -1;
}
int64_t elapsed_time_ms = now_ms - time_last_send_ms_;
// We will send the first probe packet immediately if no packet has been
// sent before.
int time_until_probe_ms = 0;
if (packet_size_last_send_ > 0 && probing_state_ == kProbing) {
int next_delta_ms = ComputeDeltaFromBitrate(packet_size_last_send_,
probe_bitrates_.front());
time_until_probe_ms = next_delta_ms - elapsed_time_ms;
// There is no point in trying to probe with less than 1 ms between packets
// as it essentially means trying to probe at infinite bandwidth.
const int kMinProbeDeltaMs = 1;
// If we have waited more than 3 ms for a new packet to probe with we will
// consider this probing session over.
const int kMaxProbeDelayMs = 3;
if (next_delta_ms < kMinProbeDeltaMs ||
time_until_probe_ms < -kMaxProbeDelayMs) {
// We currently disable probing after the first probe, as we only want
// to probe at the beginning of a connection. We should set this to
// kWait if we later want to probe periodically.
probing_state_ = kWait;
LOG(LS_INFO) << "Next delta too small, stop probing.";
time_until_probe_ms = 0;
}
}
return time_until_probe_ms;
}
void BitrateProber::PacketSent(int64_t now_ms, size_t packet_size) {
assert(packet_size > 0);
packet_size_last_send_ = packet_size;
time_last_send_ms_ = now_ms;
if (probing_state_ != kProbing)
return;
if (!probe_bitrates_.empty())
probe_bitrates_.pop_front();
}
} // namespace webrtc<|fim▁end|> | // Compute the time delta needed to send packet_size bytes at bitrate_bps
// bps. Result is in milliseconds. |
<|file_name|>CommandInputRecordField.ts<|end_file_name|><|fim▁begin|>import {CommandLineBinding} from "./CommandLineBinding";
import {
CommandInputArraySchema,
CommandInputEnumSchema,
CommandInputMapSchema,
CommandInputRecordSchema,
CommandInputSchema
} from "./CommandInputSchema";
import {Datatype} from "./Datatype";
export interface CommandInputRecordField {
name: string;
type?: Datatype | CommandInputSchema | CommandInputArraySchema | CommandInputMapSchema
| CommandInputEnumSchema | CommandInputRecordSchema | string | Array<Datatype
| CommandInputSchema | CommandInputArraySchema | CommandInputMapSchema
| CommandInputEnumSchema | CommandInputRecordSchema | string>;<|fim▁hole|> inputBinding?: CommandLineBinding;
description?: string;
label?: string;
}<|fim▁end|> | |
<|file_name|>event_event.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.addons.http_routing.models.ir_http import slug
class EventEvent(models.Model):
_inherit = "event.event"
community_menu = fields.Boolean(
"Community Menu", compute="_compute_community_menu",
readonly=False, store=True,
help="Display community tab on website")
community_menu_ids = fields.One2many(
"website.event.menu", "event_id", string="Event Community Menus",
domain=[("menu_type", "=", "community")])
@api.depends("event_type_id", "website_menu", "community_menu")
def _compute_community_menu(self):<|fim▁hole|> """ At type onchange: synchronize. At website_menu update: synchronize. """
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.community_menu = event.event_type_id.community_menu
elif event.website_menu and event.website_menu != event._origin.website_menu or not event.community_menu:
event.community_menu = True
elif not event.website_menu:
event.community_menu = False
# ------------------------------------------------------------
# WEBSITE MENU MANAGEMENT
# ------------------------------------------------------------
# OVERRIDES: ADD SEQUENCE
def _get_menu_update_fields(self):
update_fields = super(EventEvent, self)._get_menu_update_fields()
update_fields += ['community_menu']
return update_fields
def _update_website_menus(self, menus_update_by_field=None):
super(EventEvent, self)._update_website_menus(menus_update_by_field=menus_update_by_field)
for event in self:
if event.menu_id and (not menus_update_by_field or event in menus_update_by_field.get('community_menu')):
event._update_website_menu_entry('community_menu', 'community_menu_ids', '_get_community_menu_entries')
def _get_menu_type_field_matching(self):
res = super(EventEvent, self)._get_menu_type_field_matching()
res['community'] = 'community_menu'
return res
def _get_community_menu_entries(self):
self.ensure_one()
return [(_('Community'), '/event/%s/community' % slug(self), False, 80, 'community')]
def _get_track_menu_entries(self):
""" Remove agenda as this is now managed separately """
self.ensure_one()
return [
(_('Talks'), '/event/%s/track' % slug(self), False, 10, 'track'),
(_('Agenda'), '/event/%s/agenda' % slug(self), False, 70, 'track')
]
def _get_track_proposal_menu_entries(self):
""" See website_event_track._get_track_menu_entries() """
self.ensure_one()
return [(_('Talk Proposals'), '/event/%s/track_proposal' % slug(self), False, 15, 'track_proposal')]<|fim▁end|> | |
<|file_name|>insertion_sort.py<|end_file_name|><|fim▁begin|># _*_ encoding: utf-8 _*_
import timeit
def insertion_sort(nums):
"""Insertion Sort."""
for index in range(1, len(nums)):
val = nums[index]
left_index = index - 1
while left_index >= 0 and nums[left_index] > val:
nums[left_index + 1] = nums[left_index]
left_index -= 1
nums[left_index + 1] = val
return nums
<|fim▁hole|> for index in range(1, len(nums)):
val = nums[index]
left_index = index - 1
while left_index >= 0 and nums[left_index][1] > val[1]:
nums[left_index + 1] = nums[left_index]
left_index -= 1
nums[left_index + 1] = val
return nums
if __name__ == '__main__':
print("""
The insertion sort algorithm sorts each item sequentially and compares its value
to its neighbor, working its way to the end of the list and moving smaller items to the left.
Here are the best and worst case scenarios:
Input (Worst Case Scenario):
lst_one = [x for x in range(0, 2000)]
lst_one.reverse()
""")
lst_one = [x for x in range(0, 2000)]
lst_one.reverse()
time1 = timeit.timeit('insertion_sort(lst_one)', setup="from __main__ import insertion_sort, lst_one",number=500)
print("""
Number of runs = 500
Average Time = {}
Input (Best Case Scenario):
lst_two = [x for x in range(0, 2000)]
""".format(time1))
lst_two = [x for x in range(0, 2000)]
time2 = timeit.timeit('insertion_sort(lst_two)', setup="from __main__ import insertion_sort, lst_two",number=500)
print("""
Number of runs = 500
Average Time = {}
""".format(time2))<|fim▁end|> | def insertion_sort_tuples(nums):
"""Insertion Sort.""" |
<|file_name|>luoo_Download.py<|end_file_name|><|fim▁begin|>import urllib2
import os
import re<|fim▁hole|>print 'version: v1.0'
print 'author: ChenYao'
print 'data: 2013/9/4'
print 'Introductions: the music will Downloaded to path D:\\Luoo.net\\'
print '---------------------------------------------------------------'
headers = {'Referer':'http://www.luoo.net/'}
charInSongName = ['?', '!' , '\\' , '/' , '#' ,'%' , '*', '^' , '~']
sourUrl = 'http://www.luoo.net/radio/radio'
rawPath = "d:\\Luoo.net\\"
coverRaw = 'http://www.luoo.net/wp-content/uploads/'
htmlRaw = 'http://www.luoo.net/'
#sourUrl_296 = 'http://www.luoo.net/radio/radio296/mp3.xml'
#sourUrl_1 = 'http://www.luoo.net/radio/radio1/mp3player.xml'
#To do : file name
luoo = file('luoo.txt')
li = luoo.readlines()
# request mp3 jpg ...
def requestResource(sourcePath, url):
# file do not exist, download
if (os.path.isfile(sourcePath) == False):
timeStart = time.time()
req = urllib2.Request(
url = url,
headers = headers
)
try:
# catch the exception, example HTTP_404
response = urllib2.urlopen(req, timeout = 10)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
else:
# write to the file
with open(sourcePath, "wb") as code:
code.write(response.read())
# print the download time
timeEnd = time.time()
spendTime = time.strftime('%M:%S',time.localtime(timeEnd - timeStart))
print '### Download Time: [%s]' % spendTime
# file exist
elif(os.path.isfile(sourcePath)):
# then check wether file is empty
if((os.path.getsize(sourcePath)) == 0L):
# Empyt, then reomove it and retry
os.remove(sourcePath)
requestResource(sourcePath, url)
# file exist and is not empty
else:
print "### file already exist!!! "
pass
# print the download detail infomation
def print_info(songName, fileNum):
print '### Downloading >>> [%s].'%fileNum + songName
# remove the special char in songName
def removeChar(songName):
for i in charInSongName:
songName = songName.replace(i,' ')
return songName
# start download
def download(start,end):
for x in range(start,end):
startTime = time.time()
if x < 296:
Url = sourUrl + str(x) +'/mp3player.xml'
else:
Url = sourUrl + str(x) +'/mp3.xml'
folderPath = rawPath + 'Luoo_' + str(x) + '\\'
#folderPath = rawPath + li[x-2].rstrip() + '\\'
# new a fold in path
if os.path.isdir(folderPath):
pass
else:
os.mkdir(folderPath)
# read the xml
lines = urllib2.urlopen(Url, timeout = 10 ).readlines()
# total songs
songs = len(lines) - 3
print '****************'
print 'Radio: radio' + str(x)
print 'Total: ' + str(songs) + ' songs'
print '****************'
print('----------------------------------')
# Download the cover
coverUrl = coverRaw + str(x) + '.jpg'
coverPath = folderPath + 'cover.jpg'
print '### Downlonding >>> Cover.jpg'
requestResource(coverPath,coverUrl)
# Download the HTML
htmlUrl = htmlRaw + str(x)
htmlPath = folderPath + 'VOL.' + str(x) + '.html'
print '### Downloading >>> HTML'
requestResource(htmlPath,htmlUrl)
print('----------------------------------')
print '------------------------------------------------------'
fileNum = 1
for line in lines[2:-1]:
line = line.strip()
a = re.findall(r'http.*.mp3',line)
if a == []:
continue
realUrl = str(a[0])
b = re.findall(r'(?<=title=").*(?="\s*/)',line)
if b == []:
continue
songName = str(b[0]).decode('utf-8')
songName = removeChar(songName)
print_info(songName,fileNum)
# Download mp3
musicUrl = realUrl
musicPath = folderPath + songName + ".mp3"
requestResource(musicPath,musicUrl)
fileNum += 1
print '------------------------------------------------------'
lines = []
endTime = time.time()
date = time.strftime('%Y/%m/%d %H:%M:%S',time.localtime(time.time()))
during = time.strftime('%M:%S',time.localtime(endTime - startTime))
print('----------------------------------')
print "### Total time: " , during
print "### Date: " , date
print('----------------------------------')
print('\n')
if __name__ == '__main__':
startNum = input('input the start number(start from 1): ')
endNum = input('input the end(end with the last Vol): ')
if(os.path.isdir(rawPath) == False):
os.mkdir(rawPath)
download(startNum,endNum)<|fim▁end|> | import time
print '---------------------------------------------------------------'
print 'Name: Luoo.net-Mp3 crawler ' |
<|file_name|>clone_rope.rs<|end_file_name|><|fim▁begin|>extern crate ropey;
use std::iter::Iterator;
use ropey::Rope;
const TEXT: &str = include_str!("test_text.txt");
#[test]
fn clone_rope() {
let mut rope1 = Rope::from_str(TEXT);
let mut rope2 = rope1.clone();<|fim▁hole|> rope1.insert(432, "Hello ");
rope1.insert(2345, "world! ");
rope1.insert(5256, "How are ");
rope1.insert(53, "you ");
rope1.insert(768, "doing?\r\n");
rope2.insert(432, "Hello ");
rope2.insert(2345, "world! ");
rope2.insert(5256, "How are ");
rope2.insert(53, "you ");
rope2.insert(768, "doing?\r\n");
// Make sure they match
let matches = Iterator::zip(rope1.chars(), rope2.chars())
.map(|(a, b)| a == b)
.all(|n| n);
assert_eq!(matches, true);
// Insert something into the clone, and make sure they don't match
// afterwards.
rope2.insert(3891, "I'm doing fine, thanks!");
let matches = Iterator::zip(rope1.chars(), rope2.chars())
.map(|(a, b)| a == b)
.all(|n| n);
assert_eq!(matches, false);
}<|fim▁end|> |
// Do identical insertions into both ropes |
<|file_name|>test_wsgi_breakpad_collector.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import StringIO
import gzip
import web
import mock
from nose.tools import eq_, ok_
from datetime import datetime
from contextlib import closing
from configman.dotdict import DotDict
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.collector.throttler import ACCEPT, IGNORE, DEFER
from socorro.unittest.testbase import TestCase
class ObjectWithValue(object):
def __init__(self, v):
self.value = v
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.throttler = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.collector.accept_submitted_legacy_processing = False
config.collector.checksum_method = hashlib.md5
config.crash_storage = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
c = BreakpadCollector(config)
eq_(c.config, config)
eq_(c.logger, config.logger)
eq_(c.throttler, config.throttler)
eq_(c.crash_storage, config.crash_storage)
eq_(c.dump_id_prefix, 'bp-')
eq_(c.dump_field, 'dump')
def test_make_raw_crash(self):
config = self.get_standard_config()
form = DotDict()
form.ProductName = 'FireSquid'
form.Version = '99'
form.dump = 'fake dump'
form.some_field = '\x0023'
form.some_other_field = ObjectWithValue('XYZ')
class BreakpadCollectorWithMyForm(config.collector.collector_class):
def _form_as_mapping(self):
return form
c = BreakpadCollectorWithMyForm(config)
rc, dmp = c._get_raw_crash_from_form()
eq_(rc.ProductName, 'FireSquid')<|fim▁hole|> @mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST(self, mocked_web, mocked_webapi, mocked_utc_now, mocked_time):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = '\x00FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_reject_browser_with_hangid(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform[u'\u0000ProductName'] = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.HangID = 'xyz'
rawform.ProcessType = 'browser'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.throttle_rate = None
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc = dict(erc)
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (IGNORE, None)
r = c.POST()
eq_(r, "Unsupported=1\n")
ok_(not
c.crash_storage.save_raw_crash.call_count
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3cx\x0042-47a5-843f-a0f892140107'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
rawform.dump_checksums = "this is poised to overwrite and cause trouble"
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = u'1'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
config.collector.accept_submitted_legacy_processing = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99\x00'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform[u'some_field\u0000'] = '23'
rawform[u'some_\u0000other_field'] = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.ctx')
def test_POST_with_gzip(
self,
mocked_web_ctx,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
form = """
--socorro1234567
Content-Disposition: form-data; name="ProductName"
FireSquid
--socorro1234567
Content-Disposition: form-data; name="Version"
99
--socorro1234567
Content-Disposition: form-data; name="some_field"
23
--socorro1234567
Content-Disposition: form-data; name="some_other_field"
XYZ
--socorro1234567
Content-Disposition: form-data; name="dump"; filename="dump"
Content-Type: application/octet-stream
fake dump
--socorro1234567
Content-Disposition: form-data; name="aux_dump"; filename="aux_dump"
Content-Type: application/octet-stream
aux_dump contents
"""
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
with closing(StringIO.StringIO()) as s:
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(form)
g.close()
gzipped_form = s.getvalue()
mocked_webapi.data.return_value = gzipped_form
mocked_web_ctx.configure_mock(
env={
'HTTP_CONTENT_ENCODING': 'gzip',
'CONTENT_ENCODING': 'gzip',
'CONTENT_TYPE':
'multipart/form-data; boundary="socorro1234567"',
'REQUEST_METHOD': 'POST'
}
)
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)<|fim▁end|> | eq_(rc.Version, '99')
eq_(rc.some_field, '23')
eq_(rc.some_other_field, 'XYZ')
|
<|file_name|>test_convert.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, unicode_literals<|fim▁hole|>import io
from glob import glob
from psd_tools import PSDImage
from psd2svg import psd2svg
FIXTURES = [
p for p in glob(
os.path.join(os.path.dirname(__file__), 'fixtures', '*.psd'))
]
@pytest.mark.parametrize('psd_file', FIXTURES)
def test_convert(tmpdir, psd_file):
psd2svg(psd_file, tmpdir.dirname)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_io(tmpdir, psd_file):
with open(psd_file, "rb") as f:
assert isinstance(psd2svg(f), str)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_psd(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
psd2svg(psd)
@pytest.mark.parametrize('psd_file', FIXTURES[2:3])
def test_input_layer(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
assert psd2svg(psd[0]).startswith("<")
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_output_io(tmpdir, psd_file):
with io.StringIO() as f:
assert f == psd2svg(psd_file, f)<|fim▁end|> |
from builtins import str
import os
import pytest |
<|file_name|>v1_namespace_spec.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1<|fim▁hole|>
from pprint import pformat
from six import iteritems
import re
class V1NamespaceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, finalizers=None):
"""
V1NamespaceSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'finalizers': 'list[str]'
}
self.attribute_map = {
'finalizers': 'finalizers'
}
self._finalizers = finalizers
@property
def finalizers(self):
"""
Gets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
:return: The finalizers of this V1NamespaceSpec.
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""
Sets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers
:param finalizers: The finalizers of this V1NamespaceSpec.
:type: list[str]
"""
self._finalizers = finalizers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other<|fim▁end|> |
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
|
<|file_name|>multiProjects.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
# Author: Damien Farrell 2011
"""This script will create multiple projects from csv files and
add pdbs based on the csv names. It can also create peatsa jobs
and merge them back into the database"""
import pickle, sys, os, copy, time, types, math
import numpy
from PEATDB.Base import PDatabase
from PEATDB import Utils
from PEATDB.Actions import DBActions
from PEATDB.plugins.PEATSAplugin import PEATSAPlugin
from PEATDB.plugins.Correlation import CorrelationAnalyser
from PEATDB.PEATTables import PEATTableModel
import PEATDB.Utils
from PEATDB.Parsers import PDBParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import stats
#plt.rc('text',usetex=True)
plt.rc('font',size=7)
plt.rc('legend',fontsize=6)
plt.rc('savefig',dpi=300)
plt.rc('axes',linewidth=.5)
settings={'server':'enzyme.ucd.ie','username':'guest',
'password':'123'}
#path = '/home/people/farrell/Desktop/SADBPaperData'
path = os.getcwd()
savepath = os.path.join(path,'projects')
cpath = os.path.join(path,'data')
if not os.path.exists(cpath):
print 'we need a folder called data in the current path'
csvfiles = os.listdir(cpath)
dbnames = [os.path.splitext(i)[0] for i in csvfiles]
def PEATSAJobs(prjs, resubmit=False):
"""Submit PEATSA runs for all projects or merge results if done"""
for name in prjs:
print name
DB = PDatabase(local=os.path.join(savepath,name))
pdb = DB['wt'].Structure
PS = PEATSAPlugin()
PS.main(DB=DB)
if hasattr(DB.meta,'peatsa_jobs') and resubmit == False:
if 'mycalc' in DB.meta.peatsa_jobs:
print 'job is present'
#try to merge results
S = PEATTableModel(DB)
job,n = PS.getJob('mycalc')
PS.mergeResults(job, 'prediction', S)
DB.commit()
print 'merged results'
else:
mutlist = []
for p in DB.getRecs():
mutlist.append(DB.get(p).Mutations)
#print mutlist
pdbfile = PS.writetempPDB()
#we add source project data so exp data can be read from summary
prjdata = {'server':'enzyme.ucd.ie','username':'guest',
'project':name,'password':'123','port':'8080'}
PS.submitJob(name='mycalc', pdbname=DB.meta.refprotein, pdbfile=pdbfile,
mutations=mutlist, calcs=['stability'],
meta={'protein':name,'expcol':'Exp','project':prjdata})
#required to end process
PS.jobManager.stopLogging()
DB.close()
return
def createProjects(files):
"""Create multiple projects at once from csv files"""
for filename in files:
print filename
name = os.path.splitext(filename)[0]
#create/open db
DB = PDatabase(local=os.path.join(savepath,name))
DB.add('wt')
#add wt pdb
stream = DBActions.fetchPDB(name)
DBActions.addPDBFile(DB, 'wt', pdbdata=stream, pdbname=name, gui=False)<|fim▁hole|> #import data from csv
DB.importCSV(os.path.join(cpath,filename), namefield='Mutations')
print 'imported ok'
DB.deleteField('PDB')
DB.commit()
DB.close()
print 'done'
return
def summarise(projects):
summDB = PDatabase(local='summary.fs')
C = CorrelationAnalyser()
figs = []
for f in range(4):
figs.append(plt.figure())
gs = gridspec.GridSpec(5, 5, wspace=0.3, hspace=0.5)
i=0
data=[]
print 'processing %s projects' %len(projects)
for p in projects:
print 'structure:',p
DB = PDatabase(local=os.path.join(savepath,p))
S = PEATTableModel(DB)
try:
exp,pre = S.getColumns(['Exp','prediction'],allowempty=False)
errs = [j[0]-j[1] for j in zip(exp,pre)]
except:
print 'no results'
continue
#DB.close()
#add link to proj
summDB.add(p)
summDB.addField('project',fieldtype='Project')
summDB[p]['project'] = {'server':'enzyme.ucd.ie','username':'guest',
'project':p,'password':'123','port':'8080'}
print summDB.isChanged()
#stats
cc,rmse,meanerr = C.getStats(pre,exp)
#ttest for mean errs 0
ttp = round(stats.ttest_1samp(errs, 0)[1],2)
#normality of errs
w,swp = C.ShapiroWilk(errs)
x={'name':p,'mutants':len(pre),'rmse':rmse,'corrcoef':cc,'meanerr':meanerr,
'ttest':ttp,'shapirowilk':swp}
'''ax = figs[0].add_subplot(gs[0, i])
C.plotCorrelation(pre,exp,title=p,ms=2,axeslabels=False,ax=ax)
ax = figs[1].add_subplot(gs[0, i])
C.showHistogram([pre,exp],title=p,labels=['pre','exp'],ax=ax)
ax = figs[2].add_subplot(gs[0, i])
C.plotNorm(errs,title=p,lw=1,ax=ax)
#qqplot
ax = figs[3].add_subplot(gs[0, i])
C.QQplot(errs,title=p,ax=ax)'''
#get PDB info
parser = PDBParser()
descr = parser.getDescription(p)
x.update(descr)
data.append(x)
i+=1
summDB.importDict(data)
print summDB.isChanged()
summDB.commit()
#add all peatsa jobs to summary proj also
'''print 'adding peatsa job info'
PS = PEATSAPlugin()
PS.main(DB=summDB)
#summDB.meta.peatsa_jobs = None
#from ZODB.PersistentMapping import PersistentMapping
#summDB.meta.peatsa_jobs = PersistentMapping()
PS.checkJobsDict()
PS.jobManager.stopLogging()
for p in projects:
#print summDB.meta
DB = PDatabase(local=os.path.join(savepath,p))
job = DB.meta.peatsa_jobs['mycalc']
summDB.meta.peatsa_jobs[p] = job
print job
#DB.close()
print summDB.isChanged()
print summDB.meta.peatsa_jobs
summDB.commit()'''
#for i in range(len(figs)):
# figs[i].savefig('fig%s.png' %i)
#plt.show()
return
def info(projects):
"""Just return info in current projects"""
total=0
summDB = PDatabase(local='summary.fs')
for p in projects:
DB = PDatabase(local=os.path.join(savepath,p))
l = DB.length()
total += l
print '%s has %s records' %(p,l)
if p not in summDB.getRecs():
print 'not present in summary project'
print '-----------------------'
print 'info on %s projects' %len(projects)
print 'with total of %s records' %total
print '%s mutants' %(total-len(projects))
return
def findOutliers(data):
"""Outliers in all corr data"""
C = CorrelationAnalyser()
return ax
def send2Server(projects):
"""Send all projects to remote versions"""
settings={'server':'enzyme.ucd.ie','username':'guest',
'password':'123','port':8080}
adminsettings={'host':'enzyme.ucd.ie','user':'peatadmin',
'passwd':'nielsen','port':8080}
'''for p in projects:
print p
DB = PDatabase(local=os.path.join(savepath,p))
Utils.createDBonServer(prj=p,settings=adminsettings,
access='guest')
Utils.copyDBtoServer(DB,p,settings)'''
DB = PDatabase(local='summary.fs')
Utils.copyDBtoServer(DB,'PotapovDataset',settings)
return
def summarystats(projects):
"""summary stats"""
for p in projects:
DB = PDatabase(local=os.path.join(savepath,p))
#c=len(DB.recs()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--importcsv", dest="importcsv", action='store_true',
help="create/import", default=False)
parser.add_option("-j", "--jobs", dest="jobs", action='store_true',
help="submit/merge jobs", default=False)
parser.add_option("-s", "--summary", dest="summary", action='store_true',
help="do summary/stats", default=False)
parser.add_option("-p", "--path", dest="path",
help="Path with csv files")
parser.add_option("-c", "--copy", dest="copy",action='store_true',
help="copy to server", default=False)
parser.add_option("-o", "--info", dest="info",action='store_true',
help="get info", default=False)
opts, remainder = parser.parse_args()
if opts.path != None:
print path
if opts.importcsv == True:
createProjects(csvfiles)
if opts.jobs == True:
PEATSAJobs(['1bvc'])
#PEATSAJobs(dbnames, resubmit=False)
if opts.summary == True:
summarise(dbnames)
#summarise(['1wq5'])
if opts.copy == True:
send2Server(dbnames)
if opts.info == True:
info(dbnames)<|fim▁end|> | DB.meta.refprotein = 'wt'
DB.meta.info['protein'] = name |
<|file_name|>apply_update.js<|end_file_name|><|fim▁begin|>import { includes } from './array_proxy'
/**
* Given a record and an update object, apply the update on the record. Note
* that the `operate` object is unapplied.
*
* @param {Object} record
* @param {Object} update
*/
export default function applyUpdate (record, update) {
for (let field in update.replace)
record[field] = update.replace[field]
for (let field in update.push) {
const value = update.push[field]
record[field] = record[field] ? record[field].slice() : []
if (Array.isArray(value)) record[field].push(...value)
else record[field].push(value)
}
for (let field in update.pull) {
const value = update.pull[field]
record[field] = record[field] ?
record[field].slice().filter(exclude.bind(null,
Array.isArray(value) ? value : [ value ])) : []
}
}
<|fim▁hole|>}<|fim▁end|> | function exclude (values, value) {
return !includes(values, value) |
<|file_name|>parrot_utils.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2012-2014 The Paparazzi Team
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import socket
import telnetlib
import sys
from ftplib import FTP
import ftplib
# Check if IP is valid
def is_ip(address):
try:
socket.inet_aton(address)
ip = True
except socket.error:
ip = False
return ip
# Helper function
def split_into_path_and_file(name):
if name.count('/') <= 0:
return ["./", name]
return name.rsplit('/', 1)
# Execute a command
def execute_command(tn, command):
tn.write(command + '\n')
return tn.read_until('# ')[len(command) + 2:-4]
# Check the version
def check_version(tn, directory):<|fim▁hole|>
# Check what currently is running on the drone
def check_running(tn):
ps_aux = execute_command(tn, 'ps')
running = ""
if 'program.elf' in ps_aux:
running += ' Native (program.elf),'
if 'dragon-prog' in ps_aux:
running += ' Native (dragon-prog),'
if 'ap.elf' in ps_aux:
running += ' Paparazzi (ap.elf),'
if 'gst-launch' in ps_aux:
running += ' GStreamer (gst-launch)'
return running[1:]
# Check the filesystem
def check_filesystem(tn):
return execute_command(tn, 'df -h')
# Reboot the drone
def reboot(tn):
execute_command(tn, 'reboot')
# Upload ftp and catch memory-full error
def uploadfile(ftp, filename, content):
try:
ftp.storbinary("STOR " + filename, content)
except ftplib.error_temp:
print("FTP UPLOAD ERROR: Uploading FAILED: Probably your ARDrone memory is full.")
sys.exit()
except:
print("FTP UPLOAD ERROR: Maybe your ARDrone memory is full?", sys.exc_info()[0])
sys.exit()
# Connect with telnet and ftp, wait until login
def connect(host):
try:
tn = telnetlib.Telnet(host, timeout=3)
ftp = FTP(host)
ftp.login()
tn.read_until('# ')
return tn, ftp
except:
print('Could not connect to Parrot UAV (host: ' + host + ')')
exit(2)
# Close the telnet and ftp
def disconnect(tn, ftp):
tn.close()
ftp.close()<|fim▁end|> | return execute_command(tn, 'cat ' + directory + '/version.txt') |
<|file_name|>interpreterutils.rs<|end_file_name|><|fim▁begin|>use bytecodes::bytecode;
use context::Context;
use stack::StackEntry;
use constants;
use exceptions::{throw_exception, throw_exception_from_interpretererror, InterpreterException};
use traits::{BufferAccessor, HasType};
// macro allowing to simplify null reference check
#[macro_export]
macro_rules! check_null_reference {
($variable: ident, $ctx: ident) => {
if !$variable.is_of_type(constants::PrimitiveType::REFERENCE)
|| $variable.value == constants::NULL_HANDLE
{
return throw_exception($ctx, InterpreterException::NullPointerException);
}
};
}
///
/// Manages aaload, baload, saload, iaload
///
pub fn xaload(
execution_context: &mut Context,
type_: constants::PrimitiveType,
) -> Result<(), InterpreterException> {
let arrayref = execution_context
.operand_stack
.pop_check_type(constants::PrimitiveType::REFERENCE)
.unwrap();
let index = execution_context
.operand_stack
.pop_check_type(constants::PrimitiveType::SHORT)
.unwrap();
check_null_reference!(arrayref, execution_context);
let associated_reference = execution_context
.object_manager
.get_object(arrayref.value as usize);
if let Ok(e) = associated_reference {
// consistency check to make sure it is an array
assert!(e.is_array());
// in case of arrays, the primitive type represents the type of its elements
assert!(e.is_of_type(type_));
match type_ {
// for short and references, we perform thee same type of checks and
// fetch the array identically (2 by 2)
constants::PrimitiveType::SHORT | constants::PrimitiveType::REFERENCE => {
let size_one_entry = constants::REFERENCE_SIZE;
match e.read_s((index.value as usize) * size_one_entry) {
// REFERENCE_SIZE == SHORT_SIZE here
Ok(res) => {
execution_context
.operand_stack
.push(StackEntry::from_values(res, type_));
}
Err(e) => {
return throw_exception_from_interpretererror(execution_context, e);
}
}
}
// for bytes, each entry is one byte long
constants::PrimitiveType::BYTE => {
let size_one_entry = constants::BYTE_SIZE;
match e.read_b((index.value as usize) * size_one_entry) {
// retrieve v(alue of the reference of the array
Ok(res) => {
execution_context.operand_stack.bpush(res);
}
Err(e) => return throw_exception_from_interpretererror(execution_context, e),
}
}
// or integers readings are performed 4 bytes by 4 bytes
constants::PrimitiveType::INTEGER => {
let size_one_entry = constants::INTEGER_SIZE;
match e.read_i((index.value as usize) * size_one_entry) {
// retrieve value of the reference of the array
Ok(res) => {
execution_context.operand_stack.ipush(res);
}
Err(e) => return throw_exception_from_interpretererror(execution_context, e),
}
}
constants::PrimitiveType::UNKNOWN => {
panic!("Unknown type !");
}
}
} else {
return throw_exception(execution_context, associated_reference.err().unwrap());
}
Ok(())
}
///
/// Manages astore, sstore, istore and assoiated xstore_x (because index is passed as parameter)
///
pub fn xstore(execution_context: &mut Context, index: u8, type_: constants::PrimitiveType) {
match type_ {
// storing shorts and references follow the same pattern
constants::PrimitiveType::SHORT | constants::PrimitiveType::REFERENCE => {
// pop and check the type loaded from stack
let value_to_put = execution_context
.operand_stack
.pop_check_type(type_)
.unwrap();
//update local variable
execution_context
.current_frame_mut()
.unwrap()
.set_local(index as i16, value_to_put)
.unwrap();
}
// for integers, we pop and check 2 times on the stack
constants::PrimitiveType::INTEGER => {
let value_to_put1 = execution_context
.operand_stack
.pop_check_type(type_)
.unwrap();
let value_to_put2 = execution_context
.operand_stack
.pop_check_type(type_)
.unwrap();
// ... and we update 2 indexes in local variables stack
execution_context
.current_frame_mut()
.unwrap()<|fim▁hole|> .unwrap();
execution_context
.current_frame_mut()
.unwrap()
.set_local((index + 1) as i16, value_to_put2)
.unwrap();
}
_ => panic!("Unknown type"),
};
}
///
/// Manages astore, sstore, istore and assoiated xstore_x (because index is passed as parameter)
/// Note: for aaload, some supplementary checks are performed to ensure consistency of the operaton
/// See chapter 7.5.2 from JCVM specification for more details
///
pub fn xastore(
execution_context: &mut Context,
type_: constants::PrimitiveType,
) -> Result<(), InterpreterException> {
// in stack:
// array ref
// index
// value
// first, pop the array reference and check it is not null
let array_ref = execution_context
.operand_stack
.pop_check_type(constants::PrimitiveType::REFERENCE)
.unwrap();
check_null_reference!(array_ref, execution_context);
// make sure it is an array of the correct type
let array = execution_context
.object_manager
.get_object(array_ref.value as usize)
.unwrap();
// check that is is really an array
if !array.is_array() || !array.is_of_type(type_) {
return Err(InterpreterException::SecurityException);
}
let index = execution_context
.operand_stack
.pop_check_type(constants::PrimitiveType::SHORT)
.unwrap();
/*let value = execution_context
.operand_stack
.pop_check_type(type_)
.unwrap_or(return Err(InterpreterException::SecurityException));*/
Ok(())
}<|fim▁end|> | .set_local(index as i16, value_to_put1) |
<|file_name|>imcbackup.py<|end_file_name|><|fim▁begin|># Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains APIs to facilitate Imc backup and import
"""
import time
from ..imcexception import ImcValidationException
def backup_create(handle, remote_host, remote_file, protocol, username, password,
passphrase, timeout_in_sec=600, entity="CMC", **kwargs):
"""
backup_create helps create and download Imc backups.
Args:
handle (ImcHandle): Imc Connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
timeout_in_sec (number) : time in seconds for which method waits
for the backUp file to generate before it exits.
entity (str): For C3260 platforms:
"CMC" for backup of chassis related configuration and state
"CIMC1" for backup of server-1 related configuration and state
"CIMC2" for backup of server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_create(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_create(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.mgmt.MgmtBackup import MgmtBackup, MgmtBackupConsts
from ..mometa.top.TopSystem import TopSystem
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
top_system = TopSystem()
parent_mo = None
mgmt_backup = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_backup = MgmtBackup(parent_mo_or_dn=parent_mo)<|fim▁hole|> mgmt_backup.remote_file = remote_file
mgmt_backup.user = username
mgmt_backup.pwd = password
mgmt_backup.passphrase = passphrase
mgmt_backup.proto = protocol
mgmt_backup.admin_state = MgmtBackupConsts.ADMIN_STATE_ENABLED
mgmt_backup.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_backup.entity = entity
handle.add_mo(mgmt_backup, modify_present=True)
# Checking for the backup to complete.
time.sleep(10)
duration = timeout_in_sec
poll_interval = 2
download_status = False
while not download_status:
mgmt_backup = handle.query_dn(dn=mgmt_backup.dn)
admin_state_temp = mgmt_backup.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtBackupConsts.ADMIN_STATE_DISABLED:
if mgmt_backup.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_backup.fsm_stage_descr == "Error":
raise ImcValidationException("Failed to export the CIMC "
"configuration file." +
"Error Code: " +
mgmt_backup.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_backup.fsm_rmt_inv_err_descr)
if download_status:
break
time.sleep(min(duration, poll_interval))
duration = max(0, (duration - poll_interval))
if duration == 0:
handle.remove_mo(mgmt_backup)
raise ImcValidationException('backup_create timed out')
def backup_import(handle, remote_host, remote_file, protocol, username,
password, passphrase, entity="CMC", **kwargs):
"""
This operation uploads a Imc backup taken earlier via GUI
or backup_create operation for all configuration, system configuration,
and logical configuration files. User can perform an import while the
system is up and running.
Args:
handle (ImcHandle): connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
entity (str): For C3260 platforms:
"CMC" for importing chassis related configuration and state
"CIMC1" for importing server-1 related configuration and state
"CIMC2" for importing server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_import(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_import(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.top.TopSystem import TopSystem
from ..mometa.mgmt.MgmtImporter import MgmtImporter, MgmtImporterConsts
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
# create MgmtImporter
top_system = TopSystem()
parent_mo = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_importer = MgmtImporter(parent_mo_or_dn=parent_mo)
mgmt_importer.hostname = remote_host
mgmt_importer.remote_file = remote_file
mgmt_importer.proto = protocol
mgmt_importer.user = username
mgmt_importer.pwd = password
mgmt_importer.passphrase = passphrase
mgmt_importer.admin_state = MgmtImporterConsts.ADMIN_STATE_ENABLED
mgmt_importer.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_importer.entity = entity
handle.add_mo(mgmt_importer, modify_present=True)
time.sleep(10)
download_status = False
while not download_status:
mgmt_importer = handle.query_dn(dn=mgmt_importer.dn)
admin_state_temp = mgmt_importer.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtImporterConsts.ADMIN_STATE_DISABLED:
if mgmt_importer.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_importer.fsm_stage_descr == "Error":
raise ImcValidationException(
"Failed to import the CIMC "
"configuration file." +
"Error Code: " +
mgmt_importer.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_importer.fsm_rmt_inv_err_descr)
if download_status:
break
return mgmt_importer<|fim▁end|> |
mgmt_backup.hostname = remote_host |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from .models import *
from django.contrib import admin
class PingLogAdmin(admin.ModelAdmin):
list_display = ('id','hash_key','url','ip_address','user_agent','time')<|fim▁hole|>
admin.site.register(PingLog, PingLogAdmin)<|fim▁end|> | |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
@override_settings(AUTHENTICATION_BACKENDS=
('django.contrib.auth.backends.ModelBackend',))
class StudentLoginLogoutTest(TestCase):
def setUp(self):
self.student = User(username='test')
self.student.set_password('test')
self.student.save()
self.client = Client()
def test_login_with_correct_info(self):
response = self.client.post(reverse('student_signin'),
{'username': 'test', 'password': 'test'})
self.assertRedirects(response, reverse('student_dashboard'))
def test_login_with_incorrect_info(self):
response = self.client.post(reverse('student_signin'),
{'username': 'wrong', 'password': '1'})
self.assertRedirects(response, reverse('student_index'))
def test_login_and_logout(self):
self.client.login(username='test', password='test')
response = self.client.get(reverse('student_signout'))
self.assertRedirects(response, reverse('student_index'))
@override_settings(AUTHENTICATION_BACKENDS=
('django.contrib.auth.backends.ModelBackend',))
class StudentProfileTest(TestCase):
def setUp(self):
self.student = User(username='test')
self.student.set_password('test')
self.student.save()
self.client = Client()
self.client.login(username='test', password='test')
def test_profile_exist(self):
self.assertTrue(self.student.profile)
self.assertEqual(self.student.profile.school_id, '')
self.assertEqual(self.student.profile.grade, '')
self.assertEqual(self.student.profile.class_num, '')
self.assertEqual(self.student.profile.phone_num, '')
self.assertEqual(self.student.profile.major, '')
def test_modified_profile(self):
response = self.client.post(reverse('update_student_profile'),
{'school_id': 'school_id',
'grade': 'grade',
'major': 'major',
'class_num': 'class_num',
'phone_num': ''})
self.assertEqual(response.content, '{"status_phrase": "ok"}')
self.assertEqual(self.student.profile.school_id, 'school_id')
self.assertEqual(self.student.profile.grade, 'grade')
self.assertEqual(self.student.profile.major, 'major')
self.assertEqual(self.student.profile.class_num, 'class_num')
self.assertEqual(self.student.profile.phone_num, '')
def test_modified_profile_illegally(self):
response = self.client.post(reverse('update_student_profile'),
{'school_id': 'school_id',
'grade': 'grade',
'major': 'major',<|fim▁hole|> self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '{"status_phrase": "fail"}')
self.assertEqual(self.student.profile.school_id, '')
self.assertEqual(self.student.profile.grade, '')
self.assertEqual(self.student.profile.major, '')
self.assertEqual(self.student.profile.class_num, '')
self.assertEqual(self.student.profile.phone_num, '')<|fim▁end|> | 'class_num': '',
'phone_num': ''}) |
<|file_name|>pg.rs<|end_file_name|><|fim▁begin|>use discrete::{DiscreteDist32};
use env::{Env, DiscreteEnv, EnvRepr, EnvConvert, Action, DiscreteAction, Response, Value, Episode, EpisodeStep};
use operator::prelude::*;
use operator::rw::{ReadBuffer, ReadAccumulateBuffer, WriteBuffer, AccumulateBuffer};
use rng::xorshift::{Xorshiftplus128Rng};
use sharedmem::{RwSlice};
use rand::{Rng};
use std::cell::{RefCell};
use std::cmp::{min};
use std::marker::{PhantomData};
use std::rc::{Rc};
pub struct EpisodeStepSample<E> where E: Env {
//pub env: Rc<RefCell<E>>,
pub env: Rc<E>,
pub act_idx: Option<u32>,
pub suffix_r: Option<E::Response>,
pub baseline: Option<f32>,
weight: Option<f32>,
}
impl<E> EpisodeStepSample<E> where E: Env {
pub fn new(env: Rc<E>, act_idx: Option<u32>, suffix_r: Option<E::Response>) -> EpisodeStepSample<E> {
EpisodeStepSample{
env: env,
act_idx: act_idx,
suffix_r: suffix_r,
baseline: None,
weight: None,
}
}
pub fn set_baseline(&mut self, baseline: f32) {
self.baseline = Some(baseline);
}
pub fn init_weight(&mut self) {
self.weight = Some(self.suffix_r.map(|r| r.as_scalar()).unwrap() - self.baseline.unwrap_or(0.0));
}
}
impl<E> SampleDatum<[f32]> for EpisodeStepSample<E> where E: Env + EnvRepr<f32> {
fn extract_input(&self, output: &mut [f32]) -> Result<(), ()> {
self.env.extract_observable(output);
Ok(())
}
}
impl<E> SampleLabel for EpisodeStepSample<E> where E: Env {
fn class(&self) -> Option<u32> {
self.act_idx
}
fn target(&self) -> Option<f32> {
self.suffix_r.map(|r| r.as_scalar())
}
}
impl<E> SampleLossWeight<ClassLoss> for EpisodeStepSample<E> where E: Env {
fn weight(&self) -> Option<f32> {
self.weight
}
fn mix_weight(&mut self, w: f32) -> Result<(), ()> {
self.weight = Some(self.weight.unwrap_or(1.0) * w);
Ok(())
}
}
pub struct BasePgWorker<E, PolicyOp> where E: Env {
batch_sz: usize,
minibatch_sz: usize,
max_horizon: usize,
act_dist: DiscreteDist32,
episodes: Vec<Episode<E>>,
episode_ks: Vec<usize>,
step_values: Vec<Vec<f32>>,
final_values: Vec<Option<f32>>,
_marker: PhantomData<(E, PolicyOp)>,
}
impl<E, PolicyOp> BasePgWorker<E, PolicyOp>
where E: Env + EnvRepr<f32> + Clone,
E::Action: DiscreteAction,
PolicyOp: DiffOperatorInput<f32, EpisodeStepSample<E>> + DiffOperatorOutput<f32, RwSlice<f32>>,
{
pub fn new(minibatch_sz: usize, max_horizon: usize) -> BasePgWorker<E, PolicyOp> {
let mut episodes = Vec::with_capacity(minibatch_sz);
for _ in 0 .. minibatch_sz {
episodes.push(Episode::new());
}
let mut episode_ks = Vec::with_capacity(minibatch_sz);
episode_ks.resize(minibatch_sz, 0);
let mut step_values = Vec::with_capacity(minibatch_sz);
for _ in 0 .. minibatch_sz {
step_values.push(vec![]);
}
let mut final_values = Vec::with_capacity(minibatch_sz);
final_values.resize(minibatch_sz, None);
BasePgWorker{
batch_sz: 1,
minibatch_sz: minibatch_sz,
max_horizon: max_horizon,
act_dist: DiscreteDist32::new(<E::Action as Action>::dim()),
episodes: episodes,
episode_ks: episode_ks,
step_values: step_values,
final_values: final_values,
_marker: PhantomData,
}
}
pub fn reset_episodes<R>(&mut self, init_cfg: &E::Init, rng: &mut R) where R: Rng {
for episode in self.episodes.iter_mut() {
episode.reset(init_cfg, rng);
}
}
pub fn batch_sample_steps<V, R>(&mut self, max_num_steps: Option<usize>, value_cfg: V::Cfg, policy: &mut PolicyOp, /*value: Option<&mut ValueOp>,*/ cache: &mut Vec<EpisodeStepSample<E>>, init_cfg: &E::Init, rng: &mut R) where V: Value<Res=E::Response>, R: Rng {
let action_dim = <E::Action as Action>::dim();
for episode in self.episodes.iter_mut() {
if episode.terminated() || episode.horizon() >= self.max_horizon {
episode.reset(init_cfg, rng);
}
}
for (idx, episode) in self.episodes.iter_mut().enumerate() {
let init_horizon = episode.horizon();
self.episode_ks[idx] = init_horizon;
}
let mut step_offset = 0;
loop {
let mut term_count = 0;
cache.clear();
for (idx, episode) in self.episodes.iter_mut().enumerate() {
if episode.terminated() {
// FIXME(20161017): `cache` still wants a sample.
term_count += 1;
continue;
}
let k = self.episode_ks[idx] + step_offset;
let prev_env = match k {
0 => episode.init_env.clone(),
k => episode.steps[k-1].next_env.clone(),
};
//let mut next_env: E = prev_env.borrow().clone();
let sample = EpisodeStepSample::new(prev_env, None, None);
cache.push(sample);
}
policy.load_data(&cache);
policy.forward(OpPhase::Learning);
for (idx, episode) in self.episodes.iter_mut().enumerate() {
let output = policy.get_output();
// FIXME(20161009): sometimes the policy output contains NaNs because
// all probabilities were zero, should gracefully handle this case.
let act_idx = match self.act_dist.reset(&output.borrow()[idx * action_dim .. (idx+1) * action_dim]) {
Ok(_) => self.act_dist.sample(rng).unwrap(),
Err(_) => rng.gen_range(0, action_dim),
};
let action = <E::Action as DiscreteAction>::from_idx(act_idx as u32);
let k = self.episode_ks[idx] + step_offset;
let prev_env = match k {
0 => episode.init_env.clone(),
k => episode.steps[k-1].next_env.clone(),
};
let mut next_env = (*prev_env).clone();
if let Ok(res) = next_env.step(&action) {
episode.steps.push(EpisodeStep{
action: action,
res: res,
//next_env: Rc::new(RefCell::new(next_env)),
next_env: Rc::new(next_env),
});
} else {<|fim▁hole|> }
}
step_offset += 1;
if term_count == self.episodes.len() {
break;
} else if let Some(max_num_steps) = max_num_steps {
if step_offset >= max_num_steps {
break;
}
}
}
for (idx, episode) in self.episodes.iter_mut().enumerate() {
if !episode.terminated() {
if false /*let Some(ref mut value_op) = value*/ {
let impute_val = 0.0; // FIXME: get from the value op.
/*value_op.load_data(&cache);
value_op.forward(OpPhase::Learning);
let impute_val = value_op.get_output().borrow()[0];*/
self.final_values[idx] = Some(impute_val);
} else {
self.final_values[idx] = None;
}
} else {
self.final_values[idx] = None;
}
let mut suffix_val = if let Some(final_val) = self.final_values[idx] {
Some(<V as Value>::from_scalar(final_val, value_cfg))
} else {
None
};
self.step_values[idx].resize(episode.horizon(), 0.0);
for k in (self.episode_ks[idx] .. episode.horizon()).rev() {
if let Some(res) = episode.steps[k].res {
if let Some(ref mut suffix_val) = suffix_val {
suffix_val.lreduce(res);
} else {
suffix_val = Some(<V as Value>::from_res(res, value_cfg));
}
}
if let Some(suffix_val) = suffix_val {
self.step_values[idx][k] = suffix_val.to_scalar();
}
}
}
}
pub fn sample_steps<V, R>(&mut self, max_num_steps: Option<usize>, value_cfg: V::Cfg, policy: &mut PolicyOp, /*value: Option<&mut ValueOp>,*/ cache: &mut Vec<EpisodeStepSample<E>>, init_cfg: &E::Init, rng: &mut R) where V: Value<Res=E::Response>, R: Rng {
let action_dim = <E::Action as Action>::dim();
for episode in self.episodes.iter_mut() {
if episode.terminated() || episode.horizon() >= self.max_horizon {
episode.reset(init_cfg, rng);
}
}
for (idx, episode) in self.episodes.iter_mut().enumerate() {
let init_horizon = episode.horizon();
let horizon_limit = if let Some(max_num_steps) = max_num_steps {
min(init_horizon + max_num_steps, self.max_horizon)
} else {
self.max_horizon
};
for k in init_horizon .. horizon_limit {
if episode.terminated() {
break;
}
let prev_env = match k {
0 => episode.init_env.clone(),
k => episode.steps[k-1].next_env.clone(),
};
let mut next_env = (*prev_env).clone();
let sample = EpisodeStepSample::new(prev_env, None, None);
cache.clear();
cache.push(sample);
policy.load_data(&cache);
policy.forward(OpPhase::Learning);
let output = policy.get_output();
// FIXME(20161009): sometimes the policy output contains NaNs because
// all probabilities were zero, should gracefully handle this case.
self.act_dist.reset(&output.borrow()[ .. action_dim]);
let act_idx = self.act_dist.sample(rng).unwrap();
let action = <E::Action as DiscreteAction>::from_idx(act_idx as u32);
if let Ok(res) = next_env.step(&action) {
episode.steps.push(EpisodeStep{
action: action,
res: res,
//next_env: Rc::new(RefCell::new(next_env)),
next_env: Rc::new(next_env),
});
} else {
panic!();
}
}
if !episode.terminated() {
if false /*let Some(ref mut value_op) = value*/ {
let impute_val = 0.0; // FIXME: get from the value op.
/*value_op.load_data(&cache);
value_op.forward(OpPhase::Learning);
let impute_val = value_op.get_output().borrow()[0];*/
self.final_values[idx] = Some(impute_val);
} else {
self.final_values[idx] = None;
}
} else {
self.final_values[idx] = None;
}
let mut suffix_val = if let Some(final_val) = self.final_values[idx] {
Some(<V as Value>::from_scalar(final_val, value_cfg))
} else {
None
};
self.step_values[idx].resize(episode.horizon(), 0.0);
for k in (init_horizon .. episode.horizon()).rev() {
if let Some(res) = episode.steps[k].res {
if let Some(ref mut suffix_val) = suffix_val {
suffix_val.lreduce(res);
} else {
suffix_val = Some(<V as Value>::from_res(res, value_cfg));
}
}
if let Some(suffix_val) = suffix_val {
self.step_values[idx][k] = suffix_val.to_scalar();
}
}
}
}
pub fn sample<R>(&mut self, policy: &mut PolicyOp, /*value: Option<&mut ValueOp>,*/ cache: &mut Vec<EpisodeStepSample<E>>, episodes: &mut [Episode<E>], init_cfg: &E::Init, rng: &mut R) where R: Rng {
let action_dim = <E::Action as Action>::dim();
for episode in episodes {
episode.reset(init_cfg, rng);
for k in episode.steps.len() .. self.max_horizon {
if episode.terminated() {
break;
}
let prev_env = match k {
0 => episode.init_env.clone(),
k => episode.steps[k-1].next_env.clone(),
};
let mut next_env = (*prev_env).clone();
let sample = EpisodeStepSample::new(prev_env, None, None);
cache.clear();
cache.push(sample);
policy.load_data(&cache);
policy.forward(OpPhase::Learning);
let output = policy.get_output();
self.act_dist.reset(&output.borrow()[ .. action_dim]);
let act_idx = self.act_dist.sample(rng).unwrap();
let action = <E::Action as DiscreteAction>::from_idx(act_idx as u32);
if let Ok(res) = next_env.step(&action) {
episode.steps.push(EpisodeStep{
action: action,
res: res,
//next_env: Rc::new(RefCell::new(next_env)),
next_env: Rc::new(next_env),
});
} else {
panic!();
}
}
if !episode.terminated() {
// FIXME(20161008): bootstrap with the value of the last state.
}
episode._fill_suffixes();
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct PolicyGradConfig {
pub batch_sz: usize,
pub minibatch_sz: usize,
pub step_size: f32,
pub max_horizon: usize,
pub baseline: f32,
}
pub struct PolicyGradWorker<E, Op>
where E: Env + EnvRepr<f32> + Clone, //EnvConvert<E>,
E::Action: DiscreteAction,
Op: DiffOperatorInput<f32, EpisodeStepSample<E>> + DiffOperatorOutput<f32, RwSlice<f32>>,
{
cfg: PolicyGradConfig,
operator: Op,
cache: Vec<EpisodeStepSample<E>>,
base_pg: BasePgWorker<E, Op>,
param: Vec<f32>,
grad_acc: Vec<f32>,
}
impl<E, Op> PolicyGradWorker<E, Op>
where E: Env + EnvRepr<f32> + Clone, //EnvConvert<E>,
E::Action: DiscreteAction,
Op: DiffOperator<f32> + DiffOperatorInput<f32, EpisodeStepSample<E>> + DiffOperatorOutput<f32, RwSlice<f32>>,
{
pub fn new(cfg: PolicyGradConfig, op: Op) -> PolicyGradWorker<E, Op> {
let grad_sz = op.diff_param_sz();
let mut param = Vec::with_capacity(grad_sz);
param.resize(grad_sz, 0.0);
let mut grad_acc = Vec::with_capacity(grad_sz);
grad_acc.resize(grad_sz, 0.0);
PolicyGradWorker{
cfg: cfg,
operator: op,
cache: Vec::with_capacity(cfg.batch_sz),
base_pg: BasePgWorker::new(cfg.minibatch_sz, cfg.max_horizon),
param: param,
grad_acc: grad_acc,
}
}
pub fn sample<R>(&mut self, episodes: &mut [Episode<E>], init_cfg: &E::Init, rng: &mut R) where R: Rng {
self.base_pg.sample(&mut self.operator, &mut self.cache, episodes, init_cfg, rng);
}
}
impl<E, Op> OptWorker<f32, Episode<E>> for PolicyGradWorker<E, Op>
where E: Env + EnvRepr<f32> + Clone, //EnvConvert<E>,
E::Action: DiscreteAction,
Op: DiffOperator<f32> + DiffOperatorInput<f32, EpisodeStepSample<E>> + DiffOperatorOutput<f32, RwSlice<f32>>,
{
type Rng = Op::Rng;
fn init_param(&mut self, rng: &mut Self::Rng) {
self.operator.init_param(rng);
self.operator.store_param(&mut self.param, 0);
//println!("DEBUG: param: {:?}", self.param);
}
/*fn load_local_param(&mut self, param_reader: &mut ReadBuffer<f32>) { unimplemented!(); }
fn store_local_param(&mut self, param_writer: &mut WriteBuffer<f32>) { unimplemented!(); }
fn store_global_param(&mut self, param_writer: &mut WriteBuffer<f32>) { unimplemented!(); }*/
fn step(&mut self, episodes: &mut Iterator<Item=Episode<E>>) {
self.operator.reset_loss();
self.operator.reset_grad();
self.cache.clear();
for episode in episodes.take(self.cfg.minibatch_sz) {
for k in 0 .. episode.steps.len() {
let mut sample = match k {
0 => EpisodeStepSample::new(
episode.init_env.clone(),
Some(episode.steps[0].action.idx()),
episode.suffixes[0]),
k => EpisodeStepSample::new(
episode.steps[k-1].next_env.clone(),
Some(episode.steps[k].action.idx()),
episode.suffixes[k]),
};
assert!(sample.suffix_r.is_some());
sample.set_baseline(self.cfg.baseline);
sample.init_weight();
sample.mix_weight(1.0 / self.cfg.minibatch_sz as f32);
self.cache.push(sample);
if self.cache.len() < self.cfg.batch_sz {
continue;
}
self.operator.load_data(&self.cache);
self.operator.forward(OpPhase::Learning);
self.operator.backward();
self.cache.clear();
}
}
if !self.cache.is_empty() {
self.operator.load_data(&self.cache);
self.operator.forward(OpPhase::Learning);
self.operator.backward();
self.cache.clear();
}
self.operator.accumulate_grad(-self.cfg.step_size, 0.0, &mut self.grad_acc, 0);
self.operator.update_param(1.0, 1.0, &mut self.grad_acc, 0);
self.operator.store_param(&mut self.param, 0);
//println!("DEBUG: param: {:?}", self.param);
}
fn eval(&mut self, epoch_size: usize, samples: &mut Iterator<Item=Episode<E>>) {
}
}
/*impl<E, Op> OptStats<()> for PolicyGradWorker<E, Op>
where E: Env + EnvRepr<f32> + EnvConvert<E>,
E::Action: DiscreteAction,
Op: DiffOperatorIo<f32, EpisodeStepSample<E>, RwSlice<f32>>,
{
fn reset_opt_stats(&mut self) {
unimplemented!();
}
fn get_opt_stats(&self) -> &() {
unimplemented!();
}
}*/<|fim▁end|> | panic!(); |
<|file_name|>securityhub.rs<|end_file_name|><|fim▁begin|>#![cfg(feature = "securityhub")]
extern crate env_logger;
extern crate rusoto_core;
extern crate rusoto_securityhub;
use rusoto_core::Region;
use rusoto_securityhub::{ListInvitationsRequest, SecurityHub, SecurityHubClient};
<|fim▁hole|> let _ = env_logger::try_init();
let client = SecurityHubClient::new(Region::UsWest2);
let request = ListInvitationsRequest {
..Default::default()
};
client.list_invitations(request).await.unwrap();
}<|fim▁end|> | #[tokio::test]
#[ignore]
async fn should_list_invitations() { |
<|file_name|>quickstart.js<|end_file_name|><|fim▁begin|>// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
const {OAuth2Client} = require('google-auth-library');
const {grpc} = require('google-gax');
const http = require('http');
const url = require('url');
const open = require('open');
const destroyer = require('server-destroy');
// [START nodejs_channel_quickstart]
// Reads the secrets from a `oauth2.keys.json` file, which should be downloaded
// from the Google Developers Console and saved in the same directory with the
// sample app.
// This sample app only calls read-only methods from the Channel API. Include
// additional scopes if calling methods that modify the configuration.
const SCOPES = ['https://www.googleapis.com/auth/apps.order'];
async function listCustomers(authClient, accountId) {
// Imports the Google Cloud client library
const {CloudChannelServiceClient} = require('@google-cloud/channel');
// Instantiates a client using OAuth2 credentials.
const sslCreds = grpc.credentials.createSsl();
const credentials = grpc.credentials.combineChannelCredentials(
sslCreds,
grpc.credentials.createFromGoogleCredential(authClient)
);
<|fim▁hole|> // Instantiates a client
const client = new CloudChannelServiceClient({
sslCreds: credentials,
});
// Calls listCustomers() method
const customers = await client.listCustomers({
parent: `accounts/${accountId}`,
});
console.info(customers);
}
/**
* Create a new OAuth2Client, and go through the OAuth2 content
* workflow. Return the full client to the callback.
*/
function getAuthenticatedClient(keys) {
return new Promise((resolve, reject) => {
// Create an oAuth client to authorize the API call. Secrets are kept in a
// `keys.json` file, which should be downloaded from the Google Developers
// Console.
const oAuth2Client = new OAuth2Client(
keys.web.client_id,
keys.web.client_secret,
// The first redirect URL from the `oauth2.keys.json` file will be used
// to generate the OAuth2 callback URL. Update the line below or edit
// the redirect URL in the Google Developers Console if needed.
// This sample app expects the callback URL to be
// 'http://localhost:3000/oauth2callback'
keys.web.redirect_uris[0]
);
// Generate the url that will be used for the consent dialog.
const authorizeUrl = oAuth2Client.generateAuthUrl({
access_type: 'offline',
scope: SCOPES.join(' '),
});
// Open an http server to accept the oauth callback. In this example, the
// only request to our webserver is to /oauth2callback?code=<code>
const server = http
.createServer(async (req, res) => {
try {
if (req.url.indexOf('/oauth2callback') > -1) {
// Acquire the code from the querystring, and close the web
// server.
const qs = new url.URL(req.url, 'http://localhost:3000')
.searchParams;
const code = qs.get('code');
console.log(`Code is ${code}`);
res.end('Authentication successful! Please return to the console.');
server.destroy();
// Now that we have the code, use that to acquire tokens.
const r = await oAuth2Client.getToken(code);
// Make sure to set the credentials on the OAuth2 client.
oAuth2Client.setCredentials(r.tokens);
console.info('Tokens acquired.');
resolve(oAuth2Client);
}
} catch (e) {
reject(e);
}
})
.listen(3000, () => {
// Open the browser to the authorize url to start the workflow.
// This line will not work if you are running the code in the
// environment where a browser is not available. In this case,
// copy the URL and open it manually in a browser.
console.info(`Opening the browser with URL: ${authorizeUrl}`);
open(authorizeUrl, {wait: false}).then(cp => cp.unref());
});
destroyer(server);
});
}
async function main(accountId, keys) {
// TODO: uncomment with your account id
// const accountId = 'C012345'
// TODO: uncomment this line with your oAuth2 file
//const keys = require('./oauth2.keys.json');
getAuthenticatedClient(keys).then(authClient =>
listCustomers(authClient, accountId)
);
}
// [END nodejs_channel_quickstart]
main(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});<|fim▁end|> | |
<|file_name|>few-ones.rs<|end_file_name|><|fim▁begin|>use test_float_parse::validate;
fn main() {
let mut pow = vec![];
for i in 0..63 {
pow.push(1u64 << i);
}
for a in &pow {
for b in &pow {
for c in &pow {<|fim▁hole|> }
}<|fim▁end|> | validate(&(a | b | c).to_string());
}
} |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>use formats::{match_name, Format};
use once_cell::sync::Lazy;
use parser::{
eyre::{bail, Result},
graph_triples::{Resource, ResourceInfo},
ParserTrait,
};
use std::collections::BTreeMap;
use std::sync::Arc;
// Re-exports
pub use parser::Parser;
// The following high level functions hide the implementation
// detail of having a static list of parsers. They are intended as the
// only public interface for this crate.
pub fn parse(resource: Resource, code: &str) -> Result<ResourceInfo> {
PARSERS.parse(resource, code)
}
/// The set of registered parsers in the current process
static PARSERS: Lazy<Arc<Parsers>> = Lazy::new(|| Arc::new(Parsers::new()));
/// A set of registered parsers, either built-in, or provided by plugins
struct Parsers {
inner: BTreeMap<String, Parser>,
}
/// A macro to dispatch methods to builtin parsers
///
/// This avoids having to do a search over the parsers's specs for matching `languages`.
macro_rules! dispatch_builtins {<|fim▁hole|> match $format {
#[cfg(feature = "parser-bash")]
Format::Bash | Format::Shell | Format::Zsh => Some(parser_bash::BashParser::$method($($arg),*)),
#[cfg(feature = "parser-calc")]
Format::Calc => Some(parser_calc::CalcParser::$method($($arg),*)),
#[cfg(feature = "parser-js")]
Format::JavaScript => Some(parser_js::JsParser::$method($($arg),*)),
#[cfg(feature = "parser-py")]
Format::Python => Some(parser_py::PyParser::$method($($arg),*)),
#[cfg(feature = "parser-r")]
Format::R => Some(parser_r::RParser::$method($($arg),*)),
#[cfg(feature = "parser-rust")]
Format::Rust => Some(parser_rust::RustParser::$method($($arg),*)),
#[cfg(feature = "parser-ts")]
Format::TypeScript => Some(parser_ts::TsParser::$method($($arg),*)),
// Fallback to empty result
_ => Option::<Result<ResourceInfo>>::None
}
};
}
impl Parsers {
/// Create a set of parsers
///
/// Note that these strings are labels for the parser which
/// aim to be consistent with the parser name, are convenient
/// to use to `stencila parsers show`, and don't need to be
/// consistent with format names or aliases.
pub fn new() -> Self {
let inner = vec![
#[cfg(feature = "parser-bash")]
("bash", parser_bash::BashParser::spec()),
#[cfg(feature = "parser-calc")]
("calc", parser_calc::CalcParser::spec()),
#[cfg(feature = "parser-js")]
("js", parser_js::JsParser::spec()),
#[cfg(feature = "parser-py")]
("py", parser_py::PyParser::spec()),
#[cfg(feature = "parser-r")]
("r", parser_r::RParser::spec()),
#[cfg(feature = "parser-rust")]
("rust", parser_rust::RustParser::spec()),
#[cfg(feature = "parser-ts")]
("ts", parser_ts::TsParser::spec()),
]
.into_iter()
.map(|(label, parser): (&str, Parser)| (label.to_string(), parser))
.collect();
Self { inner }
}
/// List the available parsers
fn list(&self) -> Vec<String> {
self.inner
.keys()
.into_iter()
.cloned()
.collect::<Vec<String>>()
}
/// Get a parser by label
fn get(&self, label: &str) -> Result<Parser> {
match self.inner.get(label) {
Some(parser) => Ok(parser.clone()),
None => bail!("No parser with label `{}`", label),
}
}
/// Parse a code resource
fn parse(&self, resource: Resource, code: &str) -> Result<ResourceInfo> {
let (path, language) = if let Resource::Code(code) = &resource {
(code.path.clone(), code.language.clone().unwrap_or_default())
} else {
bail!("Attempting to parse a resource that is not a `Code` resource")
};
let format = match_name(&language);
let resource_info =
if let Some(result) = dispatch_builtins!(format, parse, resource, &path, code) {
result?
} else {
bail!(
"Unable to parse code in language `{}`: no matching parser found",
language
)
};
Ok(resource_info)
}
}
impl Default for Parsers {
fn default() -> Self {
Self::new()
}
}
#[cfg(feature = "cli")]
pub mod commands {
use std::{fs, path::PathBuf};
use super::*;
use cli_utils::{async_trait::async_trait, result, Result, Run};
use parser::graph_triples::resources;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(
about = "Manage and use parsers",
setting = structopt::clap::AppSettings::ColoredHelp,
setting = structopt::clap::AppSettings::VersionlessSubcommands
)]
pub struct Command {
#[structopt(subcommand)]
pub action: Action,
}
#[derive(Debug, StructOpt)]
#[structopt(
setting = structopt::clap::AppSettings::DeriveDisplayOrder
)]
pub enum Action {
List(List),
Show(Show),
Parse(Parse),
}
#[async_trait]
impl Run for Command {
async fn run(&self) -> Result {
let Self { action } = self;
match action {
Action::List(action) => action.run().await,
Action::Show(action) => action.run().await,
Action::Parse(action) => action.run().await,
}
}
}
/// List the parsers that are available
///
/// The list of available parsers includes those that are built into the Stencila
/// binary as well as any parsers provided by plugins.
#[derive(Debug, StructOpt)]
#[structopt(
setting = structopt::clap::AppSettings::ColoredHelp
)]
pub struct List {}
#[async_trait]
impl Run for List {
async fn run(&self) -> Result {
let list = PARSERS.list();
result::value(list)
}
}
/// Show the specifications of a parser
#[derive(Debug, StructOpt)]
#[structopt(
setting = structopt::clap::AppSettings::ColoredHelp
)]
pub struct Show {
/// The label of the parser
///
/// To get the list of parser labels use `stencila parsers list`.
label: String,
}
#[async_trait]
impl Run for Show {
async fn run(&self) -> Result {
let parser = PARSERS.get(&self.label)?;
result::value(parser)
}
}
/// Parse some code using a parser
///
/// The code is parsed into a set of graph `Relation`/`Resource` pairs using the
/// parser that matches the filename extension (or specified using `--lang`).
/// Useful for testing Stencila's static code analysis for a particular language.
#[derive(Debug, StructOpt)]
#[structopt(
setting = structopt::clap::AppSettings::ColoredHelp
)]
pub struct Parse {
/// The file (or code) to parse
#[structopt(multiple = true)]
code: Vec<String>,
/// If the argument should be treated as text, rather than a file path
#[structopt(short, long)]
text: bool,
/// The language of the code
#[structopt(short, long)]
lang: Option<String>,
}
#[async_trait]
impl Run for Parse {
async fn run(&self) -> Result {
let (path, code, lang) = if self.text || self.code.len() > 1 {
let code = self.code.join(" ");
(
"<text>".to_string(),
code,
self.lang.clone().unwrap_or_default(),
)
} else {
let file = self.code[0].clone();
let code = fs::read_to_string(&file)?;
let ext = PathBuf::from(&file)
.extension()
.map(|ext| ext.to_string_lossy().to_string())
.unwrap_or_default();
let lang = self.lang.clone().or(Some(ext)).unwrap_or_default();
(file, code, lang)
};
let path = PathBuf::from(path);
let resource = resources::code(&path, "<id>", "<cli>", Some(lang));
let resource_info = PARSERS.parse(resource, &code)?;
result::value(resource_info)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use parser::graph_triples::{relations, resources};
use std::path::PathBuf;
use test_utils::assert_json_eq;
#[test]
#[cfg(feature = "parser-calc")]
fn test_parse() -> Result<()> {
let path = PathBuf::from("<test>");
let resource = resources::code(&path, "<id>", "<cli>", Some("Calc".to_string()));
let resource_info = parse(resource, "a = 1\nb = a * a")?;
assert!(matches!(resource_info.execute_pure, None));
assert!(!resource_info.is_pure());
assert_json_eq!(
resource_info.relations,
vec![
(
relations::assigns((0, 0, 0, 1)),
resources::symbol(&path, "a", "Number")
),
(
relations::assigns((1, 0, 1, 1)),
resources::symbol(&path, "b", "Number")
),
(
relations::uses((1, 4, 1, 5)),
resources::symbol(&path, "a", "Number")
),
(
relations::uses((1, 8, 1, 9)),
resources::symbol(&path, "a", "Number")
)
]
);
Ok(())
}
}<|fim▁end|> | ($format:expr, $method:ident $(,$arg:expr)*) => { |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import { Component } from 'vidom';
import { DocComponent, DocTabs, DocTab, DocAttrs, DocAttr, DocExample, DocChildren, DocText, DocInlineCode } from '../../Doc';
import SimpleExample from './examples/SimpleExample';
import simpleExampleCode from '!raw!./examples/SimpleExample.js';
export default function ModalDoc({ tab, onTabChange }) {
return (
<DocComponent title="Modal">
<DocTabs value={ tab } onTabChange={ onTabChange }>
<DocTab title="Examples" value="Examples">
<DocExample title="Simple" code={ simpleExampleCode }>
<SimpleExample/>
</DocExample>
</DocTab>
<DocTab title="API" value="api">
<DocAttrs>
<DocAttr name="autoclosable" type="Boolean" def="false">
Enables the modal to be hidden on pressing "Esc" or clicking somewhere outside its content.
</DocAttr>
<DocAttr name="onHide" type="Function">
The callback to handle hide event.
</DocAttr>
<DocAttr name="theme" type="String" required>
Sets the modal theme.
</DocAttr>
<DocAttr name="visible" type="Boolean" def="false">
Sets the visibility of the modal.
</DocAttr>
</DocAttrs>
<DocChildren>
<DocText>
Children with any valid type are allowed.
</DocText>
</DocChildren>
</DocTab>
</DocTabs><|fim▁hole|> </DocComponent>
);
}<|fim▁end|> | |
<|file_name|>flag.rs<|end_file_name|><|fim▁begin|>// Std
use std::convert::From;
use std::ffi::{OsStr, OsString};
use std::fmt::{Display, Formatter, Result};
use std::mem;
use std::rc::Rc;
use std::result::Result as StdResult;
// Internal
use args::{AnyArg, ArgSettings, Base, DispOrder, Switched};
use map::{self, VecMap};
use Arg;
#[derive(Default, Clone, Debug)]
#[doc(hidden)]
pub struct FlagBuilder<'n, 'e>
where
'n: 'e,
{
pub b: Base<'n, 'e>,
pub s: Switched<'e>,
}<|fim▁hole|> FlagBuilder {
b: Base::new(name),
..Default::default()
}
}
}
impl<'a, 'b, 'z> From<&'z Arg<'a, 'b>> for FlagBuilder<'a, 'b> {
fn from(a: &'z Arg<'a, 'b>) -> Self {
FlagBuilder {
b: Base::from(a),
s: Switched::from(a),
}
}
}
impl<'a, 'b> From<Arg<'a, 'b>> for FlagBuilder<'a, 'b> {
fn from(mut a: Arg<'a, 'b>) -> Self {
FlagBuilder {
b: mem::replace(&mut a.b, Base::default()),
s: mem::replace(&mut a.s, Switched::default()),
}
}
}
impl<'n, 'e> Display for FlagBuilder<'n, 'e> {
fn fmt(&self, f: &mut Formatter) -> Result {
if let Some(l) = self.s.long {
write!(f, "--{}", l)?;
} else {
write!(f, "-{}", self.s.short.unwrap())?;
}
Ok(())
}
}
impl<'n, 'e> AnyArg<'n, 'e> for FlagBuilder<'n, 'e> {
fn name(&self) -> &'n str {
self.b.name
}
fn overrides(&self) -> Option<&[&'e str]> {
self.b.overrides.as_ref().map(|o| &o[..])
}
fn requires(&self) -> Option<&[(Option<&'e str>, &'n str)]> {
self.b.requires.as_ref().map(|o| &o[..])
}
fn blacklist(&self) -> Option<&[&'e str]> {
self.b.blacklist.as_ref().map(|o| &o[..])
}
fn required_unless(&self) -> Option<&[&'e str]> {
self.b.r_unless.as_ref().map(|o| &o[..])
}
fn is_set(&self, s: ArgSettings) -> bool {
self.b.settings.is_set(s)
}
fn has_switch(&self) -> bool {
true
}
fn takes_value(&self) -> bool {
false
}
fn set(&mut self, s: ArgSettings) {
self.b.settings.set(s)
}
fn max_vals(&self) -> Option<u64> {
None
}
fn val_names(&self) -> Option<&VecMap<&'e str>> {
None
}
fn num_vals(&self) -> Option<u64> {
None
}
fn possible_vals(&self) -> Option<&[&'e str]> {
None
}
fn validator(&self) -> Option<&Rc<Fn(String) -> StdResult<(), String>>> {
None
}
fn validator_os(&self) -> Option<&Rc<Fn(&OsStr) -> StdResult<(), OsString>>> {
None
}
fn min_vals(&self) -> Option<u64> {
None
}
fn short(&self) -> Option<char> {
self.s.short
}
fn long(&self) -> Option<&'e str> {
self.s.long
}
fn val_delim(&self) -> Option<char> {
None
}
fn help(&self) -> Option<&'e str> {
self.b.help
}
fn long_help(&self) -> Option<&'e str> {
self.b.long_help
}
fn val_terminator(&self) -> Option<&'e str> {
None
}
fn default_val(&self) -> Option<&'e OsStr> {
None
}
fn default_vals_ifs(&self) -> Option<map::Values<(&'n str, Option<&'e OsStr>, &'e OsStr)>> {
None
}
fn env<'s>(&'s self) -> Option<(&'n OsStr, Option<&'s OsString>)> {
None
}
fn longest_filter(&self) -> bool {
self.s.long.is_some()
}
fn aliases(&self) -> Option<Vec<&'e str>> {
if let Some(ref aliases) = self.s.aliases {
let vis_aliases: Vec<_> = aliases
.iter()
.filter_map(|&(n, v)| if v { Some(n) } else { None })
.collect();
if vis_aliases.is_empty() {
None
} else {
Some(vis_aliases)
}
} else {
None
}
}
}
impl<'n, 'e> DispOrder for FlagBuilder<'n, 'e> {
fn disp_ord(&self) -> usize {
self.s.disp_ord
}
}
impl<'n, 'e> PartialEq for FlagBuilder<'n, 'e> {
fn eq(&self, other: &FlagBuilder<'n, 'e>) -> bool {
self.b == other.b
}
}
#[cfg(test)]
mod test {
use super::FlagBuilder;
use args::settings::ArgSettings;
#[test]
fn flagbuilder_display() {
let mut f = FlagBuilder::new("flg");
f.b.settings.set(ArgSettings::Multiple);
f.s.long = Some("flag");
assert_eq!(&*format!("{}", f), "--flag");
let mut f2 = FlagBuilder::new("flg");
f2.s.short = Some('f');
assert_eq!(&*format!("{}", f2), "-f");
}
#[test]
fn flagbuilder_display_single_alias() {
let mut f = FlagBuilder::new("flg");
f.s.long = Some("flag");
f.s.aliases = Some(vec![("als", true)]);
assert_eq!(&*format!("{}", f), "--flag");
}
#[test]
fn flagbuilder_display_multiple_aliases() {
let mut f = FlagBuilder::new("flg");
f.s.short = Some('f');
f.s.aliases = Some(vec![
("alias_not_visible", false),
("f2", true),
("f3", true),
("f4", true),
]);
assert_eq!(&*format!("{}", f), "-f");
}
}<|fim▁end|> |
impl<'n, 'e> FlagBuilder<'n, 'e> {
pub fn new(name: &'n str) -> Self { |
<|file_name|>scheme.rs<|end_file_name|><|fim▁begin|>use alloc::boxed::Box;
use collections::string::{String, ToString};
use collections::vec::Vec;
use collections::vec_deque::VecDeque;
use core::ops::DerefMut;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use core::usize;
use common::elf::Elf;
use common::event::Event;
use common::get_slice::GetSlice;
use common::memory;
use scheduler::context::{context_switch, Context, ContextMemory};
use schemes::{Result, KScheme, Resource, ResourceSeek, Url};
use sync::Intex;
use syscall::SysError;
use syscall::handle::*;
#[derive(Copy, Clone, Debug)]
pub enum Msg {
Start,
Stop,
Open(*const u8, usize),
Close(usize),
Dup(usize),
Path(usize, *mut u8, usize),
Read(usize, *mut u8, usize),
Write(usize, *const u8, usize),
Seek(usize, isize, isize),
Sync(usize),
Truncate(usize, usize),
Event(*const Event),
}
pub struct Response {
msg: Msg,
result: AtomicUsize,
ready: AtomicBool,
}
impl Response {
pub fn new(msg: Msg) -> Box<Response> {
box Response {
msg: msg,
result: AtomicUsize::new(0),
ready: AtomicBool::new(false),
}
}
pub fn set(&mut self, result: usize) {
self.result.store(result, Ordering::SeqCst);
self.ready.store(true, Ordering::SeqCst);
}
pub fn get(&self) -> usize {
while !self.ready.load(Ordering::SeqCst) {
unsafe { context_switch(false) };
}
return self.result.load(Ordering::SeqCst);
}
}
impl Drop for Response {
fn drop(&mut self) {
while !self.ready.load(Ordering::SeqCst) {
unsafe { context_switch(false) };
}
}
}
/// A scheme resource
pub struct SchemeResource {
/// Pointer to parent
pub parent: *mut SchemeItem,
/// File handle
pub handle: usize,
}
impl SchemeResource {
pub fn send(&self, msg: Msg) -> Result<usize> {
unsafe { (*self.parent).send(msg) }
}
}
impl Resource for SchemeResource {
/// Duplicate the resource
fn dup(&self) -> Result<Box<Resource>> {
match self.send(Msg::Dup(self.handle)) {
Ok(fd) => Ok(box SchemeResource {
parent: self.parent,
handle: fd,
}),
Err(err) => Err(err)
}
}
/// Return the url of this resource
fn url(&self) -> Url {
let mut buf: [u8; 4096] = [0; 4096];
match self.send(Msg::Path(self.handle, buf.as_mut_ptr(), buf.len())) {
Ok(result) => Url::from_string(unsafe {
String::from_utf8_unchecked(Vec::from(buf.get_slice(None, Some(result))))
}),
Err(err) => Url::new()
}
}
/// Read data to buffer
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut ptr = buf.as_mut_ptr();
let contexts = ::env().contexts.lock();
if let Some(current) = contexts.current() {
if let Some(translated) = unsafe { current.translate(ptr as usize) } {
ptr = translated as *mut u8;
}
}
self.send(Msg::Read(self.handle, ptr, buf.len()))
}
/// Write to resource
fn write(&mut self, buf: &[u8]) -> Result<usize> {
let mut ptr = buf.as_ptr();
let contexts = ::env().contexts.lock();
if let Some(current) = contexts.current() {
if let Some(translated) = unsafe { current.translate(ptr as usize) } {
ptr = translated as *const u8;
}
}
self.send(Msg::Write(self.handle, ptr, buf.len()))
}
/// Seek
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
let offset;
let whence;
match pos {
ResourceSeek::Start(off) => {
whence = 0;
offset = off as isize;
}
ResourceSeek::Current(off) => {
whence = 1;
offset = off;
}
ResourceSeek::End(off) => {
whence = 2;
offset = off;
}
}
self.send(Msg::Seek(self.handle, offset, whence))
}
/// Sync the resource
fn sync(&mut self) -> Result<()> {
match self.send(Msg::Sync(self.handle)) {
Ok(_) => Ok(()),
Err(err) => Err(err)
}
}
fn truncate(&mut self, len: usize) -> Result<()> {
match self.send(Msg::Truncate(self.handle, len)) {
Ok(_) => Ok(()),
Err(err) => Err(err)
}
}
}
impl Drop for SchemeResource {
fn drop(&mut self) {
self.send(Msg::Close(self.handle));
}
}
/// A scheme item
pub struct SchemeItem {
/// The URL
url: Url,
/// The scheme
scheme: String,
/// The binary for the scheme
binary: Url,
/// Messages to be responded to
responses: Intex<VecDeque<*mut Response>>,
/// The handle
handle: usize,
_start: usize,
_stop: usize,
_open: usize,
_dup: usize,
_fpath: usize,
_read: usize,
_write: usize,<|fim▁hole|> _ftruncate: usize,
_close: usize,
_event: usize,
}
impl SchemeItem {
/// Load scheme item from URL
pub fn from_url(url: &Url) -> Box<SchemeItem> {
let mut scheme_item = box SchemeItem {
url: url.clone(),
scheme: String::new(),
binary: Url::from_string(url.to_string() + "main.bin"),
responses: Intex::new(VecDeque::new()),
handle: 0,
_start: 0,
_stop: 0,
_open: 0,
_dup: 0,
_fpath: 0,
_read: 0,
_write: 0,
_lseek: 0,
_fsync: 0,
_ftruncate: 0,
_close: 0,
_event: 0,
};
for part in url.reference().rsplit('/') {
if !part.is_empty() {
scheme_item.scheme = part.to_string();
break;
}
}
let mut memory = Vec::new();
if let Ok(mut resource) = scheme_item.binary.open() {
let mut vec: Vec<u8> = Vec::new();
resource.read_to_end(&mut vec);
unsafe {
let executable = Elf::from_data(vec.as_ptr() as usize);
scheme_item._start = executable.symbol("_start");
scheme_item._stop = executable.symbol("_stop");
scheme_item._open = executable.symbol("_open");
scheme_item._dup = executable.symbol("_dup");
scheme_item._fpath = executable.symbol("_fpath");
scheme_item._read = executable.symbol("_read");
scheme_item._write = executable.symbol("_write");
scheme_item._lseek = executable.symbol("_lseek");
scheme_item._fsync = executable.symbol("_fsync");
scheme_item._ftruncate = executable.symbol("_ftruncate");
scheme_item._close = executable.symbol("_close");
scheme_item._event = executable.symbol("_event");
for segment in executable.load_segment().iter() {
let virtual_address = segment.vaddr as usize;
let virtual_size = segment.mem_len as usize;
//TODO: Warning: Investigate this hack!
let hack = virtual_address % 4096;
let physical_address = memory::alloc(virtual_size + hack);
if physical_address > 0 {
// Copy progbits
::memcpy((physical_address + hack) as *mut u8,
(executable.data + segment.off as usize) as *const u8,
segment.file_len as usize);
// Zero bss
if segment.mem_len > segment.file_len {
::memset((physical_address + hack + segment.file_len as usize) as *mut u8,
0,
segment.mem_len as usize - segment.file_len as usize);
}
memory.push(ContextMemory {
physical_address: physical_address,
virtual_address: virtual_address - hack,
virtual_size: virtual_size + hack,
writeable: segment.flags & 2 == 2
});
}
}
}
}
let wd = url.to_string();
let scheme_item_ptr: *mut SchemeItem = scheme_item.deref_mut();
Context::spawn(scheme_item.binary.to_string(),
box move || {
unsafe {
{
let wd_c = wd + "\0";
do_sys_chdir(wd_c.as_ptr());
let stdio_c = "debug:\0";
do_sys_open(stdio_c.as_ptr(), 0);
do_sys_open(stdio_c.as_ptr(), 0);
do_sys_open(stdio_c.as_ptr(), 0);
let mut contexts = ::env().contexts.lock();
if let Some(mut current) = contexts.current_mut() {
current.unmap();
(*current.memory.get()) = memory;
current.map();
}
}
(*scheme_item_ptr).run();
}
});
scheme_item.handle = match scheme_item.send(Msg::Start) {
Ok(handle) => handle,
Err(_) => 0
};
scheme_item
}
}
impl KScheme for SchemeItem {
fn scheme(&self) -> &str {
return &self.scheme;
}
// TODO: Hack for orbital
fn event(&mut self, event: &Event) {
self.send(Msg::Event(event));
}
fn open(&mut self, url: &Url, flags: usize) -> Result<Box<Resource>> {
let c_str = url.to_string() + "\0";
match self.send(Msg::Open(c_str.as_ptr(), flags)) {
Ok(fd) => Ok(box SchemeResource {
parent: self,
handle: fd,
}),
Err(err) => Err(err)
}
}
}
impl Drop for SchemeItem {
fn drop(&mut self) {
self.send(Msg::Stop);
}
}
impl SchemeItem {
pub fn send(&mut self, msg: Msg) -> Result<usize> {
let mut response = Response::new(msg);
self.responses.lock().push_back(response.deref_mut());
SysError::demux(response.get())
}
// TODO: More advanced check
pub fn valid(&self, call: usize) -> bool {
call > 0
}
pub fn run(&mut self) {
let mut running = true;
while running {
let response_option = self.responses.lock().pop_front();
if let Some(response_ptr) = response_option {
let ret = match unsafe { (*response_ptr).msg } {
Msg::Start => if self.valid(self._start) {
let fn_ptr: *const usize = &self._start;
unsafe { (*(fn_ptr as *const extern "C" fn() -> usize))() }
} else {
0
},
Msg::Stop => if self.valid(self._stop) {
running = false;
let fn_ptr: *const usize = &self._stop;
unsafe { (*(fn_ptr as *const extern "C" fn(usize) -> usize))(self.handle) }
} else {
usize::MAX
},
Msg::Open(path, flags) => if self.valid(self._open) {
let fn_ptr: *const usize = &self._open;
unsafe { (*(fn_ptr as *const extern "C" fn(usize, *const u8, usize) -> usize))(self.handle, path, flags) }
} else {
usize::MAX
},
Msg::Event(event_ptr) => if self.valid(self._event) {
let fn_ptr: *const usize = &self._event;
unsafe { (*(fn_ptr as *const extern "C" fn(usize, usize) -> usize))(self.handle, event_ptr as usize) }
} else {
usize::MAX
},
Msg::Dup(fd) => if self.valid(self._dup) {
let fn_ptr: *const usize = &self._dup;
unsafe { (*(fn_ptr as *const extern "C" fn(usize) -> usize))(fd) }
} else {
usize::MAX
},
Msg::Path(fd, ptr, len) => if self.valid(self._fpath) {
let fn_ptr: *const usize = &self._fpath;
unsafe {
(*(fn_ptr as *const extern "C" fn(usize, *mut u8, usize) -> usize))(fd,
ptr,
len)
}
} else {
usize::MAX
},
Msg::Read(fd, ptr, len) => if self.valid(self._read) {
let fn_ptr: *const usize = &self._read;
unsafe {
(*(fn_ptr as *const extern "C" fn(usize, *mut u8, usize) -> usize))(fd,
ptr,
len)
}
} else {
usize::MAX
},
Msg::Write(fd, ptr, len) =>
if self.valid(self._write) {
let fn_ptr: *const usize = &self._write;
unsafe { (*(fn_ptr as *const extern "C" fn(usize, *const u8, usize) -> usize))(fd, ptr, len) }
} else {
usize::MAX
},
Msg::Seek(fd, offset, whence) =>
if self.valid(self._lseek) {
let fn_ptr: *const usize = &self._lseek;
unsafe { (*(fn_ptr as *const extern "C" fn(usize, isize, isize) -> usize))(fd, offset, whence) }
} else {
usize::MAX
},
Msg::Sync(fd) => if self.valid(self._fsync) {
let fn_ptr: *const usize = &self._fsync;
unsafe { (*(fn_ptr as *const extern "C" fn(usize) -> usize))(fd) }
} else {
usize::MAX
},
Msg::Truncate(fd, len) => if self.valid(self._ftruncate) {
let fn_ptr: *const usize = &self._ftruncate;
unsafe { (*(fn_ptr as *const extern "C" fn(usize, usize) -> usize))(fd, len) }
} else {
usize::MAX
},
Msg::Close(fd) => if self.valid(self._close) {
let fn_ptr: *const usize = &self._close;
unsafe { (*(fn_ptr as *const extern "C" fn(usize) -> usize))(fd) }
} else {
usize::MAX
},
};
unsafe { (*response_ptr).set(ret); }
} else {
unsafe { context_switch(false); }
}
}
}
}<|fim▁end|> | _lseek: usize,
_fsync: usize, |
<|file_name|>table.ts<|end_file_name|><|fim▁begin|>/// <reference path="../vendor/underscore.d.ts" />
/// <reference path="./promises/promises.ts" />
/// <reference path="./common.ts" />
module IndexedStorage {
export module Table {
<|fim▁hole|> export interface Info {
ix: string[][];
ux: string[][];
key:string;
}
export class Structure {
static factory( name:string, uniques:any[] = [], indexes:any[] = [], key:string = '' ):Structure {
var structure:Structure = new Structure();
structure.name = name;
structure.uniques = uniques;
structure.indexes = indexes;
structure.key = key;
return structure;
}
// string:keyPath, '':autoIncrement, false/null:onsave
public key:string = '';
public indexes:any[] = [];
public uniques:any[] = [];
public name:string = '';
private changeSets:Changes = null;
private structure:Info = null;
public changes():Changes {
if ( this.changeSets === null ) {
this.changeSets = Changes.factory();
}
return this.changeSets;
}
public getStructure():Info {
if ( this.structure === null ) {
var struct:Info = { ux: [], ix: [], key: this.key };
_.each( {ix: this.indexes, ux: this.uniques}, function ( structure:any[], param?:string ):void {
struct[param] = _.map( structure, function ( value:any ) {
return _.isArray( value ) ? value : [value];
} );
} );
this.structure = struct;
}
return this.structure;
}
public structureId():string {
return JSON.stringify( { i: this.indexes, u: this.uniques } );
}
public getName():string {
return this.name;
}
}
export class Changes {
static factory():Changes {
return new Changes();
}
private items:ChangeSet[] = [];
public list():ChangeSet[] {
return this.items;
}
public add( name:string, cb:any ):Changes {
this.items.push( { name: name, callback: cb } );
return this;
}
}
export interface ChangeSet {
name:string;
callback:ChangeSetCallback;
}
export interface ChangeSetCallback {
( database:IDBOpenDBRequest, oldVersion?:number, newVersion?:number ):boolean;
}
}
}<|fim▁end|> | |
<|file_name|>configs.py<|end_file_name|><|fim▁begin|># Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common configs for compatibility_lib and compatibility_server.
Note that a unit test exists for checking that the configs.py file in
compatibility_lib is the same as the configs.py file in compatibility_server.
The reason for this set up is that these modules need to be isolated from
each other, but there also needs to be consistency in the objects and data
in this file since they exist in the same workflow.
Steps for updating the package list / white list:
1. Make sure to update both lists when appropriate (the package has been
release to PyPI and the github repo exists)
2. Skip the dashboard tests and build when adding any new packages to
either list
3. Release a new version of compatibility lib
4. Redeploy the badge and compatibility servers
5. Unskip the dashboard tests and build
"""
def _format_url(repo_name, setuppy_path=''):
url = 'git+git://github.com/{}.git'.format(repo_name)
if setuppy_path != '':
url = '{}#subdirectory={}'.format(url, setuppy_path)
return url
# IGNORED_DEPENDENCIES are not direct dependencies for many packages and are
# not installed via pip, resulting in unresolvable high priority warnings.
IGNORED_DEPENDENCIES = [
'pip',
'setuptools',
'wheel',
'virtualenv',
]
# If updating this list, make sure to update the whitelist as well with the
# appropiate github repo if one exists.
PKG_LIST = [
'google-api-core',
'google-api-python-client',
'google-auth',
'google-cloud-asset',
'google-cloud-automl',
'google-cloud-bigquery',
'google-cloud-bigquery-datatransfer',
'google-cloud-bigquery-storage',
'google-cloud-bigtable',
'google-cloud-container',
'google-cloud-core',
'google-cloud-datacatalog',
'google-cloud-datalabeling',
'google-cloud-dataproc',
'google-cloud-datastore',
'google-cloud-dlp',
'google-cloud-dns',
'google-cloud-error-reporting',
'google-cloud-firestore',
'google-cloud-iam',
'google-cloud-iot',
# 'google-cloud-irm', # unreleased
'google-cloud-kms',
'google-cloud-language',
'google-cloud-logging',
'google-cloud-monitoring',
'google-cloud-os-login',
# 'google-cloud-phishing-protection', # unreleased
'google-cloud-pubsub',
'google-cloud-redis',
'google-cloud-resource-manager',
'google-cloud-runtimeconfig',
'google-cloud-scheduler',
'google-cloud-securitycenter',
'google-cloud-spanner',
'google-cloud-speech',
'google-cloud-storage',
'google-cloud-talent',
'google-cloud-tasks',
'google-cloud-texttospeech',
'google-cloud-trace',
'google-cloud-translate',
'google-cloud-videointelligence',
'google-cloud-vision',
'google-cloud-webrisk',
'google-cloud-websecurityscanner',
'google-resumable-media',
'apache-beam[gcp]',
'google-apitools',
'googleapis-common-protos',
'grpc-google-iam-v1',
'grpcio',
'opencensus',
'protobuf',
'protorpc',
'tensorboard',
'tensorflow',
'gcloud',
'compatibility-lib',
]
WHITELIST_PKGS = PKG_LIST
# WHITELIST_URLS maps a github url to its associated pypi package name. This is
# used for sanitizing input packages and making sure we don't run random pypi
# or github packages.
# If updating this list, make sure to update the `PKG_LIST` with the
# appropriate pypi package if one has been released.
WHITELIST_URLS = {
_format_url('googleapis/google-cloud-python', 'asset'):
'google-cloud-asset',
_format_url('googleapis/google-cloud-python', 'automl'):
'google-cloud-automl',
_format_url('googleapis/google-cloud-python', 'datacatalog'):
'google-cloud-datacatalog',
_format_url('googleapis/google-cloud-python', 'datalabeling'):
'google-cloud-datalabeling',
_format_url('googleapis/google-cloud-python', 'dataproc'):
'google-cloud-dataproc',
_format_url('googleapis/google-cloud-python', 'dlp'):
'google-cloud-dlp',
_format_url('googleapis/google-cloud-python', 'iam'):
'google-cloud-iam',
_format_url('googleapis/google-cloud-python', 'iot'):
'google-cloud-iot',
# unreleased
_format_url('googleapis/google-cloud-python', 'irm'):
'google-cloud-irm',
_format_url('googleapis/google-cloud-python', 'kms'):
'google-cloud-kms',
_format_url('googleapis/python-ndb', ''):
'google-cloud-ndb',
_format_url('googleapis/google-cloud-python', 'oslogin'):
'google-cloud-os-login',
_format_url('googleapis/google-cloud-python', 'redis'):
'google-cloud-redis',
_format_url('googleapis/google-cloud-python', 'scheduler'):
'google-cloud-scheduler',
_format_url('googleapis/google-cloud-python', 'securitycenter'):
'google-cloud-securitycenter',
_format_url('googleapis/google-cloud-python', 'tasks'):
'google-cloud-tasks',
_format_url('googleapis/google-cloud-python', 'texttospeech'):
'google-cloud-texttospeech',
_format_url('googleapis/google-cloud-python', 'webrisk'):
'google-cloud-webrisk',
_format_url('googleapis/google-cloud-python', 'websecurityscanner'):
'google-cloud-websecurityscanner',
_format_url('googleapis/google-cloud-python', 'api_core'):
'google-api-core',
_format_url('googleapis/google-cloud-python', 'bigquery'):
'google-cloud-bigquery',
_format_url('googleapis/google-cloud-python', 'bigquery_datatransfer'):
'google-cloud-bigquery-datatransfer',
_format_url('googleapis/google-cloud-python', 'bigquery_storage'):
'google-cloud-bigquery-storage',
_format_url('googleapis/google-cloud-python', 'bigtable'):
'google-cloud-bigtable',
_format_url('googleapis/google-cloud-python', 'container'):
'google-cloud-container',
_format_url('googleapis/google-cloud-python', 'core'):
'google-cloud-core',
_format_url('googleapis/google-cloud-python', 'datastore'):
'google-cloud-datastore',
_format_url('googleapis/google-cloud-python', 'dns'): 'google-cloud-dns',
_format_url('googleapis/google-cloud-python', 'error_reporting'):
'google-cloud-error-reporting',
_format_url('googleapis/google-cloud-python', 'firestore'):
'google-cloud-firestore',
_format_url('googleapis/google-cloud-python', 'language'):
'google-cloud-language',
_format_url('googleapis/google-cloud-python', 'logging'):
'google-cloud-logging',
_format_url('googleapis/google-cloud-python', 'monitoring'):
'google-cloud-monitoring',
# unreleased
_format_url('googleapis/google-cloud-python', 'phishingprotection'):
'google-cloud-phishing-protection',
_format_url('googleapis/google-cloud-python', 'pubsub'):
'google-cloud-pubsub',
_format_url('googleapis/google-cloud-python', 'resource_manager'):
'google-cloud-resource-manager',
_format_url('googleapis/google-cloud-python', 'runtimeconfig'):
'google-cloud-runtimeconfig',
_format_url('googleapis/google-cloud-python', 'spanner'):
'google-cloud-spanner',
_format_url('googleapis/google-cloud-python', 'speech'):
'google-cloud-speech',
_format_url('googleapis/google-cloud-python', 'storage'):
'google-cloud-storage',
_format_url('googleapis/google-cloud-python', 'talent'):
'google-cloud-talent',
_format_url('googleapis/google-cloud-python', 'trace'):
'google-cloud-trace',
_format_url('googleapis/google-cloud-python', 'translate'):
'google-cloud-translate',
_format_url('googleapis/google-cloud-python', 'videointelligence'):
'google-cloud-videointelligence',
_format_url('googleapis/google-cloud-python', 'vision'):
'google-cloud-vision',
_format_url('googleapis/google-api-python-client'):
'google-api-python-client',
_format_url('googleapis/google-auth-library-python'): 'google-auth',
_format_url('GoogleCloudPlatform/google-resumable-media-python'):
'google-resumable-media',
_format_url('apache/beam', 'sdks/python'): 'apache-beam[gcp]',
_format_url('google/apitools'): 'google-apitools',
_format_url('census-instrumentation/opencensus-python'): 'opencensus',
_format_url('google/protorpc'): 'protorpc',
_format_url('tensorflow/tensorflow', 'tensorflow/tools/pip_package'):
'tensorflow',
_format_url('GoogleCloudPlatform/cloud-opensource-python',
'compatibility_lib'): 'compatibility-lib',
# TODO: The following projects do not use setup.py
# googleapis-common-protos
# grpc-google-iam-v1
# grpcio
# protobuf
# tensorboard - not sure what the build process is
# _format_url('tensorflow/tensorboard', 'tensorboard/pip_package'):
# 'tensorboard',
}
# TODO: Find top 30 packages by download count in BigQuery table.
THIRD_PARTY_PACKAGE_LIST = [
'requests',
'flask',<|fim▁hole|>PKG_PY_VERSION_NOT_SUPPORTED = {
2: ['tensorflow', ],
3: ['apache-beam[gcp]', 'gsutil', ],
}<|fim▁end|> | 'django',
]
|
<|file_name|>config.py<|end_file_name|><|fim▁begin|># Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from oslo import messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common.db import options as db_options
from neutron.openstack.common import log as logging
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),<|fim▁hole|> cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context')),
cfg.StrOpt('nova_ca_certificates_file',
help=_('CA file for novaclient to verify server certificates')),
cfg.BoolOpt('nova_api_insecure', default=False,
help=_("If True, ignore any SSL validation issues")),
cfg.StrOpt('nova_region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
messaging.set_transport_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app<|fim▁end|> | cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")), |
<|file_name|>to_bits.rs<|end_file_name|><|fim▁begin|>use itertools::Itertools;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::bench::bucketers::{signed_bit_bucketer, unsigned_bit_bucketer};
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
use malachite_base_test_util::num::logic::bit_convertible::{
to_bits_asc_alt, to_bits_asc_signed_naive, to_bits_asc_unsigned_naive, to_bits_desc_alt,
to_bits_desc_signed_naive, to_bits_desc_unsigned_naive,
};
use malachite_base_test_util::runner::Runner;
pub(crate) fn register(runner: &mut Runner) {
register_unsigned_demos!(runner, demo_to_bits_asc_unsigned);
register_signed_demos!(runner, demo_to_bits_asc_signed);
register_unsigned_demos!(runner, demo_to_bits_desc_unsigned);
register_signed_demos!(runner, demo_to_bits_desc_signed);
register_unsigned_benches!(runner, benchmark_to_bits_asc_algorithms_unsigned);
register_signed_benches!(runner, benchmark_to_bits_asc_algorithms_signed);
register_unsigned_benches!(runner, benchmark_to_bits_asc_evaluation_strategy_unsigned);
register_signed_benches!(runner, benchmark_to_bits_asc_evaluation_strategy_signed);
register_unsigned_benches!(runner, benchmark_to_bits_desc_algorithms_unsigned);
register_signed_benches!(runner, benchmark_to_bits_desc_algorithms_signed);
register_unsigned_benches!(runner, benchmark_to_bits_desc_evaluation_strategy_unsigned);
register_signed_benches!(runner, benchmark_to_bits_desc_evaluation_strategy_signed);
}
fn demo_to_bits_asc_unsigned<T: PrimitiveUnsigned>(gm: GenMode, config: GenConfig, limit: usize) {
for u in unsigned_gen::<T>().get(gm, &config).take(limit) {
println!("{}.to_bits_asc() = {:?}", u, u.to_bits_asc());
}
}
fn demo_to_bits_asc_signed<T: PrimitiveSigned>(gm: GenMode, config: GenConfig, limit: usize) {
for i in signed_gen::<T>().get(gm, &config).take(limit) {
println!("{}.to_bits_asc() = {:?}", i, i.to_bits_asc());
}
}
fn demo_to_bits_desc_unsigned<T: PrimitiveUnsigned>(gm: GenMode, config: GenConfig, limit: usize) {
for u in unsigned_gen::<T>().get(gm, &config).take(limit) {
println!("{}.to_bits_desc() = {:?}", u, u.to_bits_desc());
}
}
fn demo_to_bits_desc_signed<T: PrimitiveSigned>(gm: GenMode, config: GenConfig, limit: usize) {
for i in signed_gen::<T>().get(gm, &config).take(limit) {
println!("{}.to_bits_desc() = {:?}", i, i.to_bits_desc());
}
}
fn benchmark_to_bits_asc_algorithms_unsigned<T: PrimitiveUnsigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_asc()", T::NAME),
BenchmarkType::Algorithms,<|fim▁hole|> unsigned_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&unsigned_bit_bucketer(),
&mut [
("Malachite", &mut |u| no_out!(u.to_bits_asc())),
("alt", &mut |u| no_out!(to_bits_asc_alt(&u))),
("naive", &mut |u| no_out!(to_bits_asc_unsigned_naive(u))),
],
);
}
fn benchmark_to_bits_asc_algorithms_signed<T: PrimitiveSigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_asc()", T::NAME),
BenchmarkType::Algorithms,
signed_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&signed_bit_bucketer(),
&mut [
("Malachite", &mut |i| no_out!(i.to_bits_asc())),
("alt", &mut |i| no_out!(to_bits_asc_alt(&i))),
("naive", &mut |i| no_out!(to_bits_asc_signed_naive(i))),
],
);
}
#[allow(unused_must_use)]
fn benchmark_to_bits_asc_evaluation_strategy_unsigned<T: PrimitiveUnsigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_asc()", T::NAME),
BenchmarkType::EvaluationStrategy,
unsigned_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&unsigned_bit_bucketer(),
&mut [
(&format!("{}.to_bits_asc()", T::NAME), &mut |n| {
no_out!(n.to_bits_asc())
}),
(&format!("{}.bits().collect_vec()", T::NAME), &mut |n| {
no_out!(n.bits().collect_vec())
}),
],
);
}
#[allow(unused_must_use)]
fn benchmark_to_bits_asc_evaluation_strategy_signed<T: PrimitiveSigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_asc()", T::NAME),
BenchmarkType::EvaluationStrategy,
signed_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&signed_bit_bucketer(),
&mut [
(&format!("{}.to_bits_asc()", T::NAME), &mut |n| {
no_out!(n.to_bits_asc())
}),
(&format!("{}.bits().collect_vec()", T::NAME), &mut |n| {
no_out!(n.bits().collect_vec())
}),
],
);
}
fn benchmark_to_bits_desc_algorithms_unsigned<T: PrimitiveUnsigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_desc()", T::NAME),
BenchmarkType::Algorithms,
unsigned_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&unsigned_bit_bucketer(),
&mut [
("Malachite", &mut |u| no_out!(u.to_bits_desc())),
("alt", &mut |u| no_out!(to_bits_desc_alt(&u))),
("naive", &mut |u| no_out!(to_bits_desc_unsigned_naive(u))),
],
);
}
fn benchmark_to_bits_desc_algorithms_signed<T: PrimitiveSigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_desc()", T::NAME),
BenchmarkType::Algorithms,
signed_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&signed_bit_bucketer(),
&mut [
("Malachite", &mut |i| no_out!(i.to_bits_desc())),
("alt", &mut |i| no_out!(to_bits_desc_alt(&i))),
("naive", &mut |i| no_out!(to_bits_desc_signed_naive(i))),
],
);
}
#[allow(unused_must_use)]
fn benchmark_to_bits_desc_evaluation_strategy_unsigned<T: PrimitiveUnsigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_desc()", T::NAME),
BenchmarkType::EvaluationStrategy,
unsigned_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&unsigned_bit_bucketer(),
&mut [
(&format!("{}.to_bits_desc()", T::NAME), &mut |n| {
no_out!(n.to_bits_desc())
}),
(
&format!("{}.bits().rev().collect_vec()", T::NAME),
&mut |n| no_out!(n.bits().rev().collect_vec()),
),
],
);
}
#[allow(unused_must_use)]
fn benchmark_to_bits_desc_evaluation_strategy_signed<T: PrimitiveSigned>(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
&format!("{}.to_bits_desc()", T::NAME),
BenchmarkType::EvaluationStrategy,
signed_gen::<T>().get(gm, &config),
gm.name(),
limit,
file_name,
&signed_bit_bucketer(),
&mut [
(&format!("{}.to_bits_desc()", T::NAME), &mut |n| {
no_out!(n.to_bits_desc())
}),
(
&format!("{}.bits().rev().collect_vec()", T::NAME),
&mut |n| no_out!(n.bits().rev().collect_vec()),
),
],
);
}<|fim▁end|> | |
<|file_name|>sint.rs<|end_file_name|><|fim▁begin|>use super::{write_marker, RmpWrite};
use crate::encode::{
write_pfix, write_u16, write_u32, write_u64, write_u8, ValueWriteError,
};
use crate::Marker;
/// Encodes and attempts to write a negative small integer value as a negative fixnum into the
/// given write.
///
/// According to the MessagePack specification, a negative fixed integer value is represented using
/// a single byte in `[0xe0; 0xff]` range inclusively, prepended with a special marker mask.
///
/// The function is **strict** with the input arguments - it is the user's responsibility to check
/// if the value fits in the described range, otherwise it will panic.
///
/// If you are not sure if the value fits in the given range use `write_sint` instead, which
/// automatically selects the most compact integer representation.
///
/// # Errors
///
/// This function will return `FixedValueWriteError` on any I/O error occurred while writing the
/// positive integer marker.
///
/// # Panics
///
/// Panics if `val` does not fit in `[-32; 0)` range.
#[inline]
#[track_caller]
pub fn write_nfix<W: RmpWrite>(wr: &mut W, val: i8) -> Result<(), W::Error> {
assert!(-32 <= val && val < 0);
write_marker(wr, Marker::FixNeg(val)).map_err(|e| e.0)?;
Ok(())
}
/// Encodes and attempts to write an `i8` value as a 2-byte sequence into the given write.
///
/// The first byte becomes the marker and the second one will represent the data itself.
///
/// Note, that this function will encode the given value in 2-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i8 marker (`0xd0`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data.
///
/// # Examples
///
/// ```
/// let mut buf = [0x00, 0x00];
///
/// rmp::encode::write_i8(&mut &mut buf[..], 42).ok().unwrap();
/// assert_eq!([0xd0, 0x2a], buf);
///
/// // Note, that -18 can be represented simply as `[0xee]`, but the function emits 2-byte sequence.
/// rmp::encode::write_i8(&mut &mut buf[..], -18).ok().unwrap();
/// assert_eq!([0xd0, 0xee], buf);
/// ```
pub fn write_i8<W: RmpWrite>(wr: &mut W, val: i8) -> Result<(), ValueWriteError<W::Error>> {
write_marker(wr, Marker::I8)?;
wr.write_data_i8(val)?;
Ok(())
}
/// Encodes and attempts to write an `i16` value as a 3-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 3-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i16 marker (`0xd1`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data.
pub fn write_i16<W: RmpWrite>(wr: &mut W, val: i16) -> Result<(), ValueWriteError<W::Error>> {
write_marker(wr, Marker::I16)?;<|fim▁hole|> Ok(())
}
/// Encodes and attempts to write an `i32` value as a 5-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 5-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i32 marker (`0xd2`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data.
pub fn write_i32<W: RmpWrite>(wr: &mut W, val: i32) -> Result<(), ValueWriteError<W::Error>> {
write_marker(wr, Marker::I32)?;
wr.write_data_i32(val)?;
Ok(())
}
/// Encodes and attempts to write an `i64` value as a 9-byte sequence into the given write.
///
/// The first byte becomes the marker and the others will represent the data itself.
///
/// Note, that this function will encode the given value in 9-byte sequence no matter what, even if
/// the value can be represented using single byte as a fixnum. Also note, that the first byte will
/// always be the i16 marker (`0xd3`).
///
/// If you need to fit the given buffer efficiently use `write_sint` instead, which automatically
/// selects the appropriate integer representation.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data.
pub fn write_i64<W: RmpWrite>(wr: &mut W, val: i64) -> Result<(), ValueWriteError<W::Error>> {
write_marker(wr, Marker::I64)?;
wr.write_data_i64(val)?;
Ok(())
}
/// Encodes and attempts to write an `i64` value into the given write using the most efficient
/// representation, returning the marker used.
///
/// This function obeys the MessagePack specification, which requires that the serializer SHOULD use
/// the format which represents the data in the smallest number of bytes, with the exception of
/// sized/unsized types.
///
/// Note, that the function will **always** use signed integer representation even if the value can
/// be more efficiently represented using unsigned integer encoding.
///
/// The first byte becomes the marker and the others (if present, up to 9) will represent the data
/// itself.
///
/// # Errors
///
/// This function will return `ValueWriteError` on any I/O error occurred while writing either the
/// marker or the data.
pub fn write_sint<W: RmpWrite>(wr: &mut W, val: i64) -> Result<Marker, ValueWriteError<W::Error>> {
match val {
val if -32 <= val && val < 0 => {
write_nfix(wr, val as i8)
.and(Ok(Marker::FixNeg(val as i8)))
.map_err(ValueWriteError::InvalidMarkerWrite)
}
val if -128 <= val && val < -32 => write_i8(wr, val as i8).and(Ok(Marker::I8)),
val if -32768 <= val && val < -128 => write_i16(wr, val as i16).and(Ok(Marker::I16)),
val if -2147483648 <= val && val < -32768 => write_i32(wr, val as i32).and(Ok(Marker::I32)),
val if val < -2147483648 => write_i64(wr, val).and(Ok(Marker::I64)),
val if 0 <= val && val < 128 => {
write_pfix(wr, val as u8)
.and(Ok(Marker::FixPos(val as u8)))
.map_err(ValueWriteError::InvalidMarkerWrite)
}
val if val < 256 => write_u8(wr, val as u8).and(Ok(Marker::U8)),
val if val < 65536 => write_u16(wr, val as u16).and(Ok(Marker::U16)),
val if val < 4294967296 => write_u32(wr, val as u32).and(Ok(Marker::U32)),
val => write_u64(wr, val as u64).and(Ok(Marker::U64)),
}
}<|fim▁end|> | wr.write_data_i16(val)?; |
<|file_name|>CCParser.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
# Copyright (C) 2014-2016, 2018 Rafael Senties Martinelli
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import configparser
import os
import traceback
__version__ = '18.10.16'
class CCParser(object):
def __init__(self, ini_path='', section='', debug=False):
"""
To init CCParser you can enter a path
and a section. If you doesn't know them yet
you can leave them empty.
If debug is set to True, all the exceptions
will print its traceback.
"""
self._debug = debug
self._config = configparser.ConfigParser()
if ini_path != '':
self.set_configuration_path(ini_path)
if section != '':
self.set_section(section)
self.__default_bool = False
self.__default_string = ''
self.__default_int = 0
self.__default_float = 0.0
self.__default_list = []
self._accepted_true_bool = ('true', 'yes') # must be lower case
self._accepted_false_bool = ('false', 'no') # must be lower case
def __str__(self):
return '''
CCParser instance: {}
Configuration Path: {}
Section: {}
Default boolean: {}
Default float: {}
Default integer: {}
Default string: {}
Default list: {}
'''.format( repr(self),
self.get_configuration_path(),
self.get_section(),
self.get_default_bool(),
self.get_default_float(),
self.get_default_int(),
self.get_default_str(),
self.get_default_list())
def check_value(self, value):
"""
return False if the value don't exists,
return True if the value exists
"""
if not os.path.exists(self.ini_path):
return False
else:
try:
self._config.read(self.ini_path)
except Exception:
print("CCParser Warning: reading damaged file or file without section")
print(traceback.format_exc())
print()
return False
if not self._config.has_section(self.__section):
return False
elif self._config.has_option(self.__section, value):
return True
else:
return False
def get_bool(self, value):
"""
If the value exists, return the boolean
corresponding to the string. If it does
not exists, or the value can not be converted
to a boolean, return the default boolean.
"""
if self.check_value(value):
val = self._config.get(self.__section, value).lower()
if val in self._accepted_false_bool:
return False
elif val in self._accepted_true_bool:
return True
else:
return self.__default_bool
else:
return self.__default_bool
def get_float(self, value):
"""
If the value exists, return the float
corresponding to the string. If it does
not exists, or the value can not be converted
to a float, return the default float.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = float(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_float
else:
return self.__default_float
def get_int(self, value):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the default integer.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = int(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_int
else:
return self.__default_int
def get_list(self, value):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the default integer.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = val.split("|")
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_list
else:
return self.__default_list
def get_str(self, value):
"""
If the value exists, return the string,
other wise return the default string.
"""
if self.check_value(value):
return self._config.get(self.__section, value)
else:
return self.__default_string
def get_bool_defval(self, value, default):
"""
If the value exists, return the boolean
corresponding to the string. If it does
not exists, or the value can not be converted
to a boolean, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value).lower()
if val in self._accepted_false_bool:
return False
elif val in self._accepted_true_bool:
return True
else:
return default
else:
return default
def get_float_defval(self, value, default):
"""
If the value exists, return the float
corresponding to the string. If it does
not exists, or the value can not be converted
to a float, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = float(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return default
else:
return default
def get_int_defval(self, value, default):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = int(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return default
else:
return default
def get_str_defval(self, value, default):
"""
If the value exists, return the string,
if it does not exists, return the the
second argument.
"""
if self.check_value(value):
return self._config.get(self.__section, value)
else:
return default
def set_configuration_path(self, ini_path):
"""
Set the path to the configuration file.
"""
if isinstance(ini_path, str):
self.ini_path = ini_path
if not os.path.exists(ini_path) and self._debug:
print("CCParser Warning: the path to the configuration file does not exists\n")
else:
print("CCParser Warning: The path is not valid.\n")
self.ini_path = ''
def set_section(self, section):
"""
Set the section to check for values.
"""
section = str(section)
self.__section = section
def set_default_float(self, value):
"""
Set the default float to return when
a value does not exists. By default
it returns 0.0
"""
self.__default_float = value
def set_default_string(self, value):
"""
Set the default string to return when
a value does not exists. By default
it returns an empty string.
"""
self.__default_string = value
def set_default_bool(self, value):
"""
Set the default boolean to return when
a value does not exists. By default
it returns false
"""
self.__default_bool = value
def set_default_int(self, value):
"""
Set the default integer to return when
a value does not exists. By default
it returns 0
"""
self.__default_int = value
def set_default_list(self, value):
"""
Set the default integer to return when
a value does not exists. By default
it returns 0
"""
self.__default_list = value
def write(self, value_name, value):
"""
Write the value name and its value.
If the config file does not exists,
or the directories to the path, they
will be created.
"""
if self.ini_path != '' and isinstance(self.ini_path, str):<|fim▁hole|> os.makedirs(os.path.dirname(self.ini_path))
if not os.path.exists(self.ini_path):
open(self.ini_path, 'wt').close()
try:
self._config.read(self.ini_path)
except Exception:
print("CCParser Warning: reading damaged file or file without section")
print(traceback.format_exc())
print()
return False
if not self._config.has_section(self.__section):
self._config.add_section(self.__section)
if isinstance(value, list) or isinstance(value, tuple):
values = '|'.join(item for item in value)
self._config.set(self.__section, value_name, values)
else:
self._config.set(self.__section, value_name, str(value))
with open(self.ini_path, 'w') as f:
self._config.write(f)
else:
print(
"CCParser Error: Trying to write the configuration without an ini path.")
print("Configuration Path: " + str(self.get_configuration_path()))
print()
def get_default_bool(self):
return self.__default_bool
def get_default_float(self):
return self.__default_float
def get_default_str(self):
return self.__default_string
def get_default_int(self):
return self.__default_int
def get_default_list(self):
return self.__default_list
def get_section(self):
return self.__section
def get_configuration_path(self):
return self.ini_path
if __name__ == '__main__':
def test(path):
if os.path.exists(path):
os.remove(path)
cp = CCParser(path, 'test')
print('section:', cp.get_section())
cp.write('bool', False)
print(cp.get_bool('bool'))
cp.write('bool', True)
print(cp.get_bool('bool'))
cp.write('string1', 'this is a test')
print(cp.get_str('string1'))
print(cp)
test('/home/rsm/Desktop/test.ini') # unexisting file<|fim▁end|> |
if not os.path.exists(os.path.dirname(self.ini_path)): |
<|file_name|>sidenav.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>import { MdDrawer, MdDrawerContainer } from './drawer';
export declare class MdSidenav extends MdDrawer {
}
export declare class MdSidenavContainer extends MdDrawerContainer {
_drawers: any;
}<|fim▁end|> | |
<|file_name|>AutoConfigureJooq.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.autoconfigure.jooq;<|fim▁hole|>import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.springframework.boot.autoconfigure.ImportAutoConfiguration;
/**
* {@link ImportAutoConfiguration Auto-configuration imports} for typical jOOQ tests. Most
* tests should consider using {@link JooqTest @JooqTest} rather than using this
* annotation directly.
*
* @author Michael Simons
* @since 2.0.0
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Inherited
@ImportAutoConfiguration
public @interface AutoConfigureJooq {
}<|fim▁end|> |
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited; |
<|file_name|>navigation-controls.js<|end_file_name|><|fim▁begin|>import Map from '../src/ol/Map.js';
import View from '../src/ol/View.js';
import {defaults as defaultControls, ZoomToExtent} from '../src/ol/control.js';
import TileLayer from '../src/ol/layer/Tile.js';
import OSM from '../src/ol/source/OSM.js';
const map = new Map({
controls: defaultControls().extend([
new ZoomToExtent({
extent: [<|fim▁hole|> 848966.9639063801, 5936863.986909639
]
})
]),
layers: [
new TileLayer({
source: new OSM()
})
],
target: 'map',
view: new View({
center: [0, 0],
zoom: 2
})
});<|fim▁end|> | 813079.7791264898, 5929220.284081122, |
<|file_name|>request.py<|end_file_name|><|fim▁begin|>import asyncio
import email.utils
import json
import sys
from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from urllib.parse import parse_qs, unquote, urlunparse
from httptools import parse_url
from sanic.exceptions import InvalidUsage
from sanic.log import error_logger, logger
try:
from ujson import loads as json_loads
except ImportError:
if sys.version_info[:2] == (3, 5):
def json_loads(data):
# on Python 3.5 json.loads only supports str not bytes
return json.loads(data.decode())
else:
json_loads = json.loads
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
class RequestParameters(dict):
"""Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name, default=None):
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return the entire list"""
return super().get(name, default)
class StreamBuffer:
def __init__(self, buffer_size=100):
self._queue = asyncio.Queue(buffer_size)
async def read(self):
""" Stop reading when gets None """
payload = await self._queue.get()
self._queue.task_done()
return payload
async def put(self, payload):
await self._queue.put(payload)
def is_full(self):
return self._queue.full()
class Request(dict):
"""Properties of an HTTP request such as URL, headers, etc."""
__slots__ = (
"__weakref__",
"_cookies",
"_ip",
"_parsed_url",
"_port",
"_remote_addr",
"_socket",
"app",
"body",
"endpoint",
"headers",
"method",
"parsed_args",
"parsed_files",
"parsed_form",
"parsed_json",<|fim▁hole|> "transport",
"uri_template",
"version",
)
def __init__(self, url_bytes, headers, version, method, transport):
self.raw_url = url_bytes
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self.app = None
self.headers = headers
self.version = version
self.method = method
self.transport = transport
# Init but do not inhale
self.body_init()
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_args = None
self.uri_template = None
self._cookies = None
self.stream = None
self.endpoint = None
def __repr__(self):
return "<{0}: {1} {2}>".format(
self.__class__.__name__, self.method, self.path
)
def __bool__(self):
if self.transport:
return True
return False
def body_init(self):
self.body = []
def body_push(self, data):
self.body.append(data)
def body_finish(self):
self.body = b"".join(self.body)
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def token(self):
"""Attempt to return the auth header token.
:return: token related to request
"""
prefixes = ("Bearer", "Token")
auth_header = self.headers.get("Authorization")
if auth_header is not None:
for prefix in prefixes:
if prefix in auth_header:
return auth_header.partition(prefix)[-1].strip()
return auth_header
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.get(
"Content-Type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
@property
def args(self):
if self.parsed_args is None:
if self.query_string:
self.parsed_args = RequestParameters(
parse_qs(self.query_string)
)
else:
self.parsed_args = RequestParameters()
return self.parsed_args
@property
def raw_args(self):
return {k: v[0] for k, v in self.args.items()}
@property
def cookies(self):
if self._cookies is None:
cookie = self.headers.get("Cookie")
if cookie is not None:
cookies = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def ip(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._ip
@property
def port(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._port
@property
def socket(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._socket
def _get_address(self):
self._socket = self.transport.get_extra_info("peername") or (
None,
None,
)
self._ip = self._socket[0]
self._port = self._socket[1]
@property
def remote_addr(self):
"""Attempt to return the original client ip based on X-Forwarded-For.
:return: original client ip.
"""
if not hasattr(self, "_remote_addr"):
forwarded_for = self.headers.get("X-Forwarded-For", "").split(",")
remote_addrs = [
addr
for addr in [addr.strip() for addr in forwarded_for]
if addr
]
if len(remote_addrs) > 0:
self._remote_addr = remote_addrs[0]
else:
self._remote_addr = ""
return self._remote_addr
@property
def scheme(self):
if (
self.app.websocket_enabled
and self.headers.get("upgrade") == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self):
# it appears that httptools doesn't return the host
# so pull it from the headers
return self.headers.get("Host", "")
@property
def content_type(self):
return self.headers.get("Content-Type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self):
"""return matched info after resolving route"""
return self.app.router.get(self)[2]
@property
def path(self):
return self._parsed_url.path.decode("utf-8")
@property
def query_string(self):
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self):
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
File = namedtuple("File", ["type", "body", "name"])
def parse_multipart_form(body, boundary):
"""Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
content_type = "text/plain"
content_charset = "utf-8"
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b"\r\n", line_index)
form_line = form_part[line_index:line_end_index].decode("utf-8")
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(":")
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2 :]
)
if form_header_field == "content-disposition":
field_name = form_parameters.get("name")
file_name = form_parameters.get("filename")
# non-ASCII filenames in RFC2231, "filename*" format
if file_name is None and form_parameters.get("filename*"):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters["filename*"]
)
file_name = unquote(value, encoding=encoding)
elif form_header_field == "content-type":
content_type = form_header_value
content_charset = form_parameters.get("charset", "utf-8")
if field_name:
post_data = form_part[line_index:-4]
if file_name is None:
value = post_data.decode(content_charset)
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
else:
form_file = File(
type=content_type, name=file_name, body=post_data
)
if field_name in files:
files[field_name].append(form_file)
else:
files[field_name] = [form_file]
else:
logger.debug(
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header"
)
return fields, files<|fim▁end|> | "raw_url",
"stream", |
<|file_name|>value_from_settings.py<|end_file_name|><|fim▁begin|>'''
/*******************************************************************************
*
* Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* AUTHORS: Louay Bassbouss ([email protected])
*
******************************************************************************/
'''
from django.template import TemplateSyntaxError, Node, Variable, Library
from django.conf import settings
register = Library()
# I found some tricks in URLNode and url from defaulttags.py:
# https://code.djangoproject.com/browser/django/trunk/django/template/defaulttags.py
@register.tag
def value_from_settings(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one " \
"argument (settings constant to retrieve)" % bits[0])
settingsvar = bits[1]
settingsvar = settingsvar[1:-1] if settingsvar[0] == '"' else settingsvar
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
raise TemplateSyntaxError("'value_from_settings' didn't recognise " \
"the arguments '%s'" % ", ".join(bits))
return ValueFromSettings(settingsvar, asvar)
<|fim▁hole|> self.asvar = asvar
def render(self, context):
ret_val = getattr(settings,str(self.arg))
if self.asvar:
context[self.asvar] = ret_val
return ''
else:
return ret_val<|fim▁end|> | class ValueFromSettings(Node):
def __init__(self, settingsvar, asvar):
self.arg = Variable(settingsvar) |
<|file_name|>validate.js<|end_file_name|><|fim▁begin|>//>>built<|fim▁hole|><|fim▁end|> | define({invalidMessage:"Angivet v\u00e4rde \u00e4r inte giltigt.",missingMessage:"V\u00e4rdet kr\u00e4vs.",rangeMessage:"V\u00e4rdet ligger utanf\u00f6r intervallet."}); |
<|file_name|>boss_skeram.cpp<|end_file_name|><|fim▁begin|>/*
*
* Copyright (C) 2011-2013 ArkCORE <http://www.arkania.net/>
*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
*
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "SpellScript.h"
#include "temple_of_ahnqiraj.h"
enum Yells
{
SAY_AGGRO = 0,
SAY_SLAY = 1,
SAY_SPLIT = 2,
SAY_DEATH = 3
};
enum Spells
{
SPELL_ARCANE_EXPLOSION = 26192,
SPELL_EARTH_SHOCK = 26194,
SPELL_TRUE_FULFILLMENT = 785,
SPELL_INITIALIZE_IMAGE = 3730,
SPELL_SUMMON_IMAGES = 747
};
enum Events
{
EVENT_ARCANE_EXPLOSION = 0,
EVENT_FULLFILMENT = 1,
EVENT_BLINK = 2,
EVENT_EARTH_SHOCK = 3
};
uint32 const BlinkSpells[3] = { 4801, 8195, 20449 };
<|fim▁hole|>class boss_skeram : public CreatureScript
{
public:
boss_skeram() : CreatureScript("boss_skeram") { }
struct boss_skeramAI : public BossAI
{
boss_skeramAI(Creature* creature) : BossAI(creature, DATA_SKERAM) { }
void Reset()
{
_flag = 0;
_hpct = 75.0f;
me->SetVisible(true);
}
void KilledUnit(Unit* /*victim*/)
{
Talk(SAY_SLAY);
}
void EnterEvadeMode()
{
ScriptedAI::EnterEvadeMode();
if (me->isSummon())
((TempSummon*)me)->UnSummon();
}
void JustSummoned(Creature* creature)
{
// Shift the boss and images (Get it? *Shift*?)
uint8 rand = 0;
if (_flag != 0)
{
while (_flag & (1 << rand))
rand = urand(0, 2);
DoCast(me, BlinkSpells[rand]);
_flag |= (1 << rand);
_flag |= (1 << 7);
}
while (_flag & (1 << rand))
rand = urand(0, 2);
creature->CastSpell(creature, BlinkSpells[rand]);
_flag |= (1 << rand);
if (_flag & (1 << 7))
_flag = 0;
if (Unit* Target = SelectTarget(SELECT_TARGET_RANDOM))
creature->AI()->AttackStart(Target);
float ImageHealthPct;
if (me->GetHealthPct() < 25.0f)
ImageHealthPct = 0.50f;
else if (me->GetHealthPct() < 50.0f)
ImageHealthPct = 0.20f;
else
ImageHealthPct = 0.10f;
creature->SetMaxHealth(me->GetMaxHealth() * ImageHealthPct);
creature->SetHealth(creature->GetMaxHealth() * (me->GetHealthPct() / 100.0f));
}
void JustDied(Unit* /*killer*/)
{
if (!me->isSummon())
Talk(SAY_DEATH);
else
me->RemoveCorpse();
}
void EnterCombat(Unit* /*who*/)
{
_EnterCombat();
events.Reset();
events.ScheduleEvent(EVENT_ARCANE_EXPLOSION, urand(6000, 12000));
events.ScheduleEvent(EVENT_FULLFILMENT, 15000);
events.ScheduleEvent(EVENT_BLINK, urand(30000, 45000));
events.ScheduleEvent(EVENT_EARTH_SHOCK, 2000);
Talk(SAY_AGGRO);
}
void UpdateAI(uint32 diff)
{
if (!UpdateVictim())
return;
events.Update(diff);
while (uint32 eventId = events.ExecuteEvent())
{
switch (eventId)
{
case EVENT_ARCANE_EXPLOSION:
DoCastAOE(SPELL_ARCANE_EXPLOSION, true);
events.ScheduleEvent(EVENT_ARCANE_EXPLOSION, urand(8000, 18000));
break;
case EVENT_FULLFILMENT:
/// @todo For some weird reason boss does not cast this
// Spell actually works, tested in duel
DoCast(SelectTarget(SELECT_TARGET_RANDOM, 0, 0.0f, true), SPELL_TRUE_FULFILLMENT, true);
events.ScheduleEvent(EVENT_FULLFILMENT, urand(20000, 30000));
break;
case EVENT_BLINK:
DoCast(me, BlinkSpells[urand(0, 2)]);
DoResetThreat();
me->SetVisible(true);
events.ScheduleEvent(EVENT_BLINK, urand(10000, 30000));
break;
case EVENT_EARTH_SHOCK:
DoCastVictim(SPELL_EARTH_SHOCK);
events.ScheduleEvent(EVENT_EARTH_SHOCK, 2000);
break;
}
}
if (!me->isSummon() && me->GetHealthPct() < _hpct)
{
DoCast(me, SPELL_SUMMON_IMAGES);
Talk(SAY_SPLIT);
_hpct -= 25.0f;
me->SetVisible(false);
events.RescheduleEvent(EVENT_BLINK, 2000);
}
if (me->IsWithinMeleeRange(me->getVictim()))
{
events.RescheduleEvent(EVENT_EARTH_SHOCK, 2000);
DoMeleeAttackIfReady();
}
}
private:
float _hpct;
uint8 _flag;
};
CreatureAI* GetAI(Creature* creature) const
{
return new boss_skeramAI(creature);
}
};
class PlayerOrPetCheck
{
public:
bool operator()(WorldObject* object) const
{
if (object->GetTypeId() != TYPEID_PLAYER)
if (!object->ToCreature()->isPet())
return true;
return false;
}
};
class spell_skeram_arcane_explosion : public SpellScriptLoader
{
public:
spell_skeram_arcane_explosion() : SpellScriptLoader("spell_skeram_arcane_explosion") { }
class spell_skeram_arcane_explosion_SpellScript : public SpellScript
{
PrepareSpellScript(spell_skeram_arcane_explosion_SpellScript);
void FilterTargets(std::list<WorldObject*>& targets)
{
targets.remove_if(PlayerOrPetCheck());
}
void Register()
{
OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_skeram_arcane_explosion_SpellScript::FilterTargets, EFFECT_0, TARGET_UNIT_SRC_AREA_ENEMY);
}
};
SpellScript* GetSpellScript() const
{
return new spell_skeram_arcane_explosion_SpellScript();
}
};
void AddSC_boss_skeram()
{
new boss_skeram();
new spell_skeram_arcane_explosion();
}<|fim▁end|> | |
<|file_name|>deriving-cmp-generic-enum.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded FIXME #15189
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum E<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E::E0;
let e11 = E::E1(1);
let e12 = E::E1(2);
let e21 = E::E2(1, 1);
let e22 = E::E2(1, 2);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1 != *e2, !eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}<|fim▁end|> | |
<|file_name|>norm_blr.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
from ast import literal_eval
try: # run as a package if installed
from pcntoolkit.model.bayesreg import BLR
from pcntoolkit.normative_model.norm_base import NormBase
from pcntoolkit.dataio import fileio
from pcntoolkit.util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
except ImportError:
pass
path = os.path.abspath(os.path.dirname(__file__))
if path not in sys.path:
sys.path.append(path)
del path
from model.bayesreg import BLR
from norm_base import NormBase
from dataio import fileio
from util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
class NormBLR(NormBase):
""" Normative modelling based on Bayesian Linear Regression
"""
def __init__(self, **kwargs):
X = kwargs.pop('X', None)
y = kwargs.pop('y', None)
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
self.optim_alg = kwargs.get('optimizer','powell')
if X is None:
raise(ValueError, "Data matrix must be specified")
if len(X.shape) == 1:
self.D = 1
else:
self.D = X.shape[1]
# Parse model order
if kwargs is None:
model_order = 1
elif 'configparam' in kwargs: # deprecated syntax
model_order = kwargs.pop('configparam')
elif 'model_order' in kwargs:
model_order = kwargs.pop('model_order')
else:
model_order = 1
# Force a default model order and check datatype
if model_order is None:
model_order = 1
if type(model_order) is not int:
model_order = int(model_order)
# configure heteroskedastic noise
if 'varcovfile' in kwargs:
var_cov_file = kwargs.get('varcovfile')
if var_cov_file.endswith('.pkl'):
self.var_covariates = pd.read_pickle(var_cov_file)
else:
self.var_covariates = np.loadtxt(var_cov_file)
if len(self.var_covariates.shape) == 1:
self.var_covariates = self.var_covariates[:, np.newaxis]
n_beta = self.var_covariates.shape[1]
self.var_groups = None
elif 'vargroupfile' in kwargs:
# configure variance groups (e.g. site specific variance)
var_groups_file = kwargs.pop('vargroupfile')
if var_groups_file.endswith('.pkl'):
self.var_groups = pd.read_pickle(var_groups_file)
else:
self.var_groups = np.loadtxt(var_groups_file)
var_ids = set(self.var_groups)
var_ids = sorted(list(var_ids))
n_beta = len(var_ids)
else:
self.var_groups = None
self.var_covariates = None
n_beta = 1
# are we using ARD?
if 'use_ard' in kwargs:
self.use_ard = kwargs.pop('use_ard')
else:
self.use_ard = False
if self.use_ard:
n_alpha = self.D * model_order
else:
n_alpha = 1
# Configure warped likelihood
if 'warp' in kwargs:
warp_str = kwargs.pop('warp')
if warp_str is None:
self.warp = None
n_gamma = 0
else:
# set up warp
exec('self.warp =' + warp_str + '()')
n_gamma = self.warp.get_n_params()
else:
self.warp = None
n_gamma = 0
self._n_params = n_alpha + n_beta + n_gamma
self._model_order = model_order
print("configuring BLR ( order", model_order, ")")
if (theta is None) or (len(theta) != self._n_params):
print("Using default hyperparameters")
self.theta0 = np.zeros(self._n_params)
else:
self.theta0 = theta
self.theta = self.theta0
# initialise the BLR object if the required parameters are present
if (theta is not None) and (y is not None):
Phi = create_poly_basis(X, self._model_order)
self.blr = BLR(theta=theta, X=Phi, y=y,
warp=self.warp, **kwargs)
else:
self.blr = BLR(**kwargs)
@property
def n_params(self):
return self._n_params
@property
def neg_log_lik(self):
return self.blr.nlZ
def estimate(self, X, y, **kwargs):
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
# remove warp string to prevent it being passed to the blr object
kwargs.pop('warp',None)
Phi = create_poly_basis(X, self._model_order)
if len(y.shape) > 1:
y = y.ravel()
if theta is None:
theta = self.theta0
# (re-)initialize BLR object because parameters were not specified
self.blr = BLR(theta=theta, X=Phi, y=y,
var_groups=self.var_groups,
warp=self.warp, **kwargs)
self.theta = self.blr.estimate(theta, Phi, y,
var_covariates=self.var_covariates, **kwargs)
return self
def predict(self, Xs, X=None, y=None, **kwargs):
theta = self.theta # always use the estimated coefficients
# remove from kwargs to avoid downstream problems
kwargs.pop('theta', None)
Phis = create_poly_basis(Xs, self._model_order)
if X is None:
Phi =None
else:
Phi = create_poly_basis(X, self._model_order)<|fim▁hole|> # process variance groups for the test data
if 'testvargroupfile' in kwargs:
var_groups_test_file = kwargs.pop('testvargroupfile')
if var_groups_test_file.endswith('.pkl'):
var_groups_te = pd.read_pickle(var_groups_test_file)
else:
var_groups_te = np.loadtxt(var_groups_test_file)
else:
var_groups_te = None
# process test variance covariates
if 'testvarcovfile' in kwargs:
var_cov_test_file = kwargs.get('testvarcovfile')
if var_cov_test_file.endswith('.pkl'):
var_cov_te = pd.read_pickle(var_cov_test_file)
else:
var_cov_te = np.loadtxt(var_cov_test_file)
else:
var_cov_te = None
# do we want to adjust the responses?
if 'adaptrespfile' in kwargs:
y_adapt = fileio.load(kwargs.pop('adaptrespfile'))
if len(y_adapt.shape) == 1:
y_adapt = y_adapt[:, np.newaxis]
else:
y_adapt = None
if 'adaptcovfile' in kwargs:
X_adapt = fileio.load(kwargs.pop('adaptcovfile'))
Phi_adapt = create_poly_basis(X_adapt, self._model_order)
else:
Phi_adapt = None
if 'adaptvargroupfile' in kwargs:
var_groups_adapt_file = kwargs.pop('adaptvargroupfile')
if var_groups_adapt_file.endswith('.pkl'):
var_groups_ad = pd.read_pickle(var_groups_adapt_file)
else:
var_groups_ad = np.loadtxt(var_groups_adapt_file)
else:
var_groups_ad = None
if y_adapt is None:
yhat, s2 = self.blr.predict(theta, Phi, y, Phis,
var_groups_test=var_groups_te,
var_covariates_test=var_cov_te,
**kwargs)
else:
yhat, s2 = self.blr.predict_and_adjust(theta, Phi_adapt, y_adapt, Phis,
var_groups_test=var_groups_te,
var_groups_adapt=var_groups_ad,
**kwargs)
return yhat, s2<|fim▁end|> | |
<|file_name|>ui_mainwindow.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Wed May 25 13:43:28 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 752)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/podbicon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.statusBarLabel = QtGui.QLabel(self.centralwidget)
self.statusBarLabel.setGeometry(QtCore.QRect(0, 690, 801, 20))
self.statusBarLabel.setFrameShape(QtGui.QFrame.StyledPanel)
self.statusBarLabel.setFrameShadow(QtGui.QFrame.Sunken)
self.statusBarLabel.setText(_fromUtf8(""))
self.statusBarLabel.setObjectName(_fromUtf8("statusBarLabel"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 801, 31))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.clearToolButton = QtGui.QToolButton(self.frame)
self.clearToolButton.setGeometry(QtCore.QRect(90, 0, 32, 32))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/clear.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.clearToolButton.setIcon(icon1)
self.clearToolButton.setIconSize(QtCore.QSize(32, 32))
self.clearToolButton.setObjectName(_fromUtf8("clearToolButton"))
<|fim▁hole|> icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveToolButton.setIcon(icon2)
self.saveToolButton.setIconSize(QtCore.QSize(32, 32))
self.saveToolButton.setObjectName(_fromUtf8("saveToolButton"))
self.openToolButton = QtGui.QToolButton(self.frame)
self.openToolButton.setGeometry(QtCore.QRect(30, 0, 32, 32))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openToolButton.setIcon(icon3)
self.openToolButton.setIconSize(QtCore.QSize(32, 32))
self.openToolButton.setObjectName(_fromUtf8("openToolButton"))
self.newToolButton = QtGui.QToolButton(self.frame)
self.newToolButton.setGeometry(QtCore.QRect(0, 0, 32, 32))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newToolButton.setIcon(icon4)
self.newToolButton.setIconSize(QtCore.QSize(32, 32))
self.newToolButton.setObjectName(_fromUtf8("newToolButton"))
self.printToolButton = QtGui.QToolButton(self.frame)
self.printToolButton.setGeometry(QtCore.QRect(770, 0, 32, 32))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.printToolButton.setIcon(icon5)
self.printToolButton.setIconSize(QtCore.QSize(32, 32))
self.printToolButton.setObjectName(_fromUtf8("printToolButton"))
self.exportToolButton = QtGui.QToolButton(self.frame)
self.exportToolButton.setGeometry(QtCore.QRect(740, 0, 32, 32))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/exportpdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.exportToolButton.setIcon(icon6)
self.exportToolButton.setIconSize(QtCore.QSize(32, 32))
self.exportToolButton.setObjectName(_fromUtf8("exportToolButton"))
self.orderDetailsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.orderDetailsGroupBox.setGeometry(QtCore.QRect(0, 40, 801, 71))
self.orderDetailsGroupBox.setObjectName(_fromUtf8("orderDetailsGroupBox"))
self.layoutWidget = QtGui.QWidget(self.orderDetailsGroupBox)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 781, 48))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.orderNumberLabel = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.orderNumberLabel.setFont(font)
self.orderNumberLabel.setText(_fromUtf8(""))
self.orderNumberLabel.setObjectName(_fromUtf8("orderNumberLabel"))
self.gridLayout.addWidget(self.orderNumberLabel, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.layoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1)
self.orderDateEdit = QtGui.QDateEdit(self.layoutWidget)
self.orderDateEdit.setObjectName(_fromUtf8("orderDateEdit"))
self.gridLayout.addWidget(self.orderDateEdit, 0, 3, 1, 1)
self.label_5 = QtGui.QLabel(self.layoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1)
self.paymentTermsComboBox = QtGui.QComboBox(self.layoutWidget)
self.paymentTermsComboBox.setObjectName(_fromUtf8("paymentTermsComboBox"))
self.gridLayout.addWidget(self.paymentTermsComboBox, 0, 5, 1, 1)
self.label_18 = QtGui.QLabel(self.layoutWidget)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout.addWidget(self.label_18, 1, 0, 1, 1)
self.projectComboBox = QtGui.QComboBox(self.layoutWidget)
self.projectComboBox.setObjectName(_fromUtf8("projectComboBox"))
self.gridLayout.addWidget(self.projectComboBox, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.layoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1)
self.orderStatusComboBox = QtGui.QComboBox(self.layoutWidget)
self.orderStatusComboBox.setObjectName(_fromUtf8("orderStatusComboBox"))
self.gridLayout.addWidget(self.orderStatusComboBox, 1, 3, 1, 1)
self.taxRateLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateLabel.setObjectName(_fromUtf8("taxRateLabel"))
self.gridLayout.addWidget(self.taxRateLabel, 1, 4, 1, 1)
self.taxRateValueLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateValueLabel.setText(_fromUtf8(""))
self.taxRateValueLabel.setObjectName(_fromUtf8("taxRateValueLabel"))
self.gridLayout.addWidget(self.taxRateValueLabel, 1, 5, 1, 1)
self.supplierGroupBox = QtGui.QGroupBox(self.centralwidget)
self.supplierGroupBox.setGeometry(QtCore.QRect(0, 120, 801, 80))
self.supplierGroupBox.setObjectName(_fromUtf8("supplierGroupBox"))
self.layoutWidget1 = QtGui.QWidget(self.supplierGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(280, 12, 512, 62))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout_2 = QtGui.QGridLayout(self.layoutWidget1)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_11 = QtGui.QLabel(self.layoutWidget1)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.layoutWidget1)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 0, 2, 1, 1)
self.supplierPhoneLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierPhoneLabel.setText(_fromUtf8(""))
self.supplierPhoneLabel.setObjectName(_fromUtf8("supplierPhoneLabel"))
self.gridLayout_2.addWidget(self.supplierPhoneLabel, 0, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.layoutWidget1)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1)
self.supplierFaxLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierFaxLabel.setText(_fromUtf8(""))
self.supplierFaxLabel.setObjectName(_fromUtf8("supplierFaxLabel"))
self.gridLayout_2.addWidget(self.supplierFaxLabel, 1, 3, 1, 1)
self.label_7 = QtGui.QLabel(self.layoutWidget1)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 2, 0, 1, 1)
self.supplierContactLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierContactLabel.setText(_fromUtf8(""))
self.supplierContactLabel.setObjectName(_fromUtf8("supplierContactLabel"))
self.gridLayout_2.addWidget(self.supplierContactLabel, 2, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.layoutWidget1)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 2, 2, 1, 1)
self.supplierEmailLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierEmailLabel.setText(_fromUtf8(""))
self.supplierEmailLabel.setObjectName(_fromUtf8("supplierEmailLabel"))
self.gridLayout_2.addWidget(self.supplierEmailLabel, 2, 3, 1, 1)
self.supplierAddressLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierAddressLabel.setText(_fromUtf8(""))
self.supplierAddressLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.supplierAddressLabel.setWordWrap(True)
self.supplierAddressLabel.setObjectName(_fromUtf8("supplierAddressLabel"))
self.gridLayout_2.addWidget(self.supplierAddressLabel, 0, 1, 2, 1)
self.gridLayout_2.setColumnMinimumWidth(0, 80)
self.gridLayout_2.setColumnMinimumWidth(1, 166)
self.gridLayout_2.setColumnMinimumWidth(2, 80)
self.gridLayout_2.setColumnMinimumWidth(3, 166)
self.gridLayout_2.setRowMinimumHeight(0, 16)
self.gridLayout_2.setRowMinimumHeight(1, 16)
self.gridLayout_2.setRowMinimumHeight(2, 16)
self.supplierComboBox = QtGui.QComboBox(self.supplierGroupBox)
self.supplierComboBox.setGeometry(QtCore.QRect(11, 18, 256, 20))
self.supplierComboBox.setObjectName(_fromUtf8("supplierComboBox"))
self.productsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.productsGroupBox.setGeometry(QtCore.QRect(0, 210, 801, 331))
self.productsGroupBox.setObjectName(_fromUtf8("productsGroupBox"))
self.productsTableView = QtGui.QTableView(self.productsGroupBox)
self.productsTableView.setGeometry(QtCore.QRect(10, 20, 781, 241))
self.productsTableView.setObjectName(_fromUtf8("productsTableView"))
self.layoutWidget2 = QtGui.QWidget(self.productsGroupBox)
self.layoutWidget2.setGeometry(QtCore.QRect(590, 270, 201, 53))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.gridLayout_3 = QtGui.QGridLayout(self.layoutWidget2)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.totalExcludingTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxLabel.setFont(font)
self.totalExcludingTaxLabel.setObjectName(_fromUtf8("totalExcludingTaxLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxLabel, 0, 0, 1, 1)
self.totalExcludingTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxResultLabel.setFont(font)
self.totalExcludingTaxResultLabel.setText(_fromUtf8(""))
self.totalExcludingTaxResultLabel.setObjectName(_fromUtf8("totalExcludingTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxResultLabel, 0, 1, 1, 1)
self.totalTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxLabel.setFont(font)
self.totalTaxLabel.setObjectName(_fromUtf8("totalTaxLabel"))
self.gridLayout_3.addWidget(self.totalTaxLabel, 1, 0, 1, 1)
self.totalTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxResultLabel.setFont(font)
self.totalTaxResultLabel.setText(_fromUtf8(""))
self.totalTaxResultLabel.setObjectName(_fromUtf8("totalTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalTaxResultLabel, 1, 1, 1, 1)
self.totalLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalLabel.setFont(font)
self.totalLabel.setObjectName(_fromUtf8("totalLabel"))
self.gridLayout_3.addWidget(self.totalLabel, 2, 0, 1, 1)
self.totalResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalResultLabel.setFont(font)
self.totalResultLabel.setText(_fromUtf8(""))
self.totalResultLabel.setObjectName(_fromUtf8("totalResultLabel"))
self.gridLayout_3.addWidget(self.totalResultLabel, 2, 1, 1, 1)
self.deliveryGroupBox = QtGui.QGroupBox(self.centralwidget)
self.deliveryGroupBox.setGeometry(QtCore.QRect(0, 550, 801, 131))
self.deliveryGroupBox.setObjectName(_fromUtf8("deliveryGroupBox"))
self.layoutWidget3 = QtGui.QWidget(self.deliveryGroupBox)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 20, 781, 99))
self.layoutWidget3.setObjectName(_fromUtf8("layoutWidget3"))
self.gridLayout_4 = QtGui.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_16 = QtGui.QLabel(self.layoutWidget3)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_4.addWidget(self.label_16, 0, 3, 1, 1)
self.label_14 = QtGui.QLabel(self.layoutWidget3)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_4.addWidget(self.label_14, 0, 1, 1, 1)
self.gpsCoordinatesLineEdit = QtGui.QLineEdit(self.layoutWidget3)
self.gpsCoordinatesLineEdit.setObjectName(_fromUtf8("gpsCoordinatesLineEdit"))
self.gridLayout_4.addWidget(self.gpsCoordinatesLineEdit, 3, 2, 1, 1)
self.notesPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.notesPlainTextEdit.setPlainText(_fromUtf8(""))
self.notesPlainTextEdit.setObjectName(_fromUtf8("notesPlainTextEdit"))
self.gridLayout_4.addWidget(self.notesPlainTextEdit, 0, 4, 4, 1)
self.deliveryAddressPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.deliveryAddressPlainTextEdit.setObjectName(_fromUtf8("deliveryAddressPlainTextEdit"))
self.gridLayout_4.addWidget(self.deliveryAddressPlainTextEdit, 0, 2, 3, 1)
self.label_17 = QtGui.QLabel(self.layoutWidget3)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_4.addWidget(self.label_17, 3, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.layoutWidget3)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_4.addWidget(self.label_15, 0, 0, 1, 1)
self.deliveryDateEdit = QtGui.QDateEdit(self.layoutWidget3)
self.deliveryDateEdit.setObjectName(_fromUtf8("deliveryDateEdit"))
self.gridLayout_4.addWidget(self.deliveryDateEdit, 1, 0, 1, 1)
self.gridLayout_4.setColumnMinimumWidth(0, 125)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionNewPurchaseOrder = QtGui.QAction(MainWindow)
self.actionNewPurchaseOrder.setObjectName(_fromUtf8("actionNewPurchaseOrder"))
self.actionView_Purchase_Order = QtGui.QAction(MainWindow)
self.actionView_Purchase_Order.setObjectName(_fromUtf8("actionView_Purchase_Order"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionExit_2 = QtGui.QAction(MainWindow)
self.actionExit_2.setObjectName(_fromUtf8("actionExit_2"))
self.actionPurchase_Order = QtGui.QAction(MainWindow)
self.actionPurchase_Order.setObjectName(_fromUtf8("actionPurchase_Order"))
self.actionViewReports = QtGui.QAction(MainWindow)
self.actionViewReports.setObjectName(_fromUtf8("actionViewReports"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionOpenPurchaseOrder = QtGui.QAction(MainWindow)
self.actionOpenPurchaseOrder.setObjectName(_fromUtf8("actionOpenPurchaseOrder"))
self.actionCopyPurchaseOrder = QtGui.QAction(MainWindow)
self.actionCopyPurchaseOrder.setObjectName(_fromUtf8("actionCopyPurchaseOrder"))
self.actionClearPurchaseOrder = QtGui.QAction(MainWindow)
self.actionClearPurchaseOrder.setObjectName(_fromUtf8("actionClearPurchaseOrder"))
self.actionPrintPurchaseOrder = QtGui.QAction(MainWindow)
self.actionPrintPurchaseOrder.setObjectName(_fromUtf8("actionPrintPurchaseOrder"))
self.actionEditProjects = QtGui.QAction(MainWindow)
self.actionEditProjects.setObjectName(_fromUtf8("actionEditProjects"))
self.actionEditSuppliers = QtGui.QAction(MainWindow)
self.actionEditSuppliers.setObjectName(_fromUtf8("actionEditSuppliers"))
self.actionEditProducts = QtGui.QAction(MainWindow)
self.actionEditProducts.setObjectName(_fromUtf8("actionEditProducts"))
self.actionSavePurchaseOrder = QtGui.QAction(MainWindow)
self.actionSavePurchaseOrder.setObjectName(_fromUtf8("actionSavePurchaseOrder"))
self.actionExportPurchaseOrder = QtGui.QAction(MainWindow)
self.actionExportPurchaseOrder.setObjectName(_fromUtf8("actionExportPurchaseOrder"))
self.actionEditConfiguration = QtGui.QAction(MainWindow)
self.actionEditConfiguration.setObjectName(_fromUtf8("actionEditConfiguration"))
self.menuFile.addAction(self.actionNewPurchaseOrder)
self.menuFile.addAction(self.actionOpenPurchaseOrder)
self.menuFile.addAction(self.actionSavePurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExportPurchaseOrder)
self.menuFile.addAction(self.actionPrintPurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit_2)
self.menuView.addAction(self.actionViewReports)
self.menuHelp.addAction(self.actionAbout)
self.menuEdit.addAction(self.actionClearPurchaseOrder)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditProjects)
self.menuEdit.addAction(self.actionEditSuppliers)
self.menuEdit.addAction(self.actionEditProducts)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditConfiguration)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.label_3.setBuddy(self.orderDateEdit)
self.label_5.setBuddy(self.paymentTermsComboBox)
self.label_18.setBuddy(self.orderStatusComboBox)
self.label_4.setBuddy(self.orderStatusComboBox)
self.label_16.setBuddy(self.notesPlainTextEdit)
self.label_14.setBuddy(self.deliveryAddressPlainTextEdit)
self.label_17.setBuddy(self.gpsCoordinatesLineEdit)
self.label_15.setBuddy(self.deliveryDateEdit)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.newToolButton, self.projectComboBox)
MainWindow.setTabOrder(self.projectComboBox, self.orderDateEdit)
MainWindow.setTabOrder(self.orderDateEdit, self.orderStatusComboBox)
MainWindow.setTabOrder(self.orderStatusComboBox, self.paymentTermsComboBox)
MainWindow.setTabOrder(self.paymentTermsComboBox, self.supplierComboBox)
MainWindow.setTabOrder(self.supplierComboBox, self.productsTableView)
MainWindow.setTabOrder(self.productsTableView, self.deliveryDateEdit)
MainWindow.setTabOrder(self.deliveryDateEdit, self.deliveryAddressPlainTextEdit)
MainWindow.setTabOrder(self.deliveryAddressPlainTextEdit, self.gpsCoordinatesLineEdit)
MainWindow.setTabOrder(self.gpsCoordinatesLineEdit, self.notesPlainTextEdit)
MainWindow.setTabOrder(self.notesPlainTextEdit, self.saveToolButton)
MainWindow.setTabOrder(self.saveToolButton, self.printToolButton)
MainWindow.setTabOrder(self.printToolButton, self.openToolButton)
MainWindow.setTabOrder(self.openToolButton, self.clearToolButton)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.clearToolButton.setToolTip(_translate("MainWindow", "Clear data", None))
self.clearToolButton.setText(_translate("MainWindow", "...", None))
self.saveToolButton.setToolTip(_translate("MainWindow", "Save purchase order", None))
self.saveToolButton.setText(_translate("MainWindow", "...", None))
self.openToolButton.setToolTip(_translate("MainWindow", "Open an existing purchase order", None))
self.openToolButton.setText(_translate("MainWindow", "...", None))
self.newToolButton.setToolTip(_translate("MainWindow", "Create a new purchase order", None))
self.newToolButton.setText(_translate("MainWindow", "...", None))
self.printToolButton.setToolTip(_translate("MainWindow", "Print purchase order", None))
self.printToolButton.setText(_translate("MainWindow", "...", None))
self.exportToolButton.setToolTip(_translate("MainWindow", "Export purchase order to PDF file", None))
self.exportToolButton.setText(_translate("MainWindow", "...", None))
self.orderDetailsGroupBox.setTitle(_translate("MainWindow", "Order Details", None))
self.label_2.setText(_translate("MainWindow", "Order Number:", None))
self.label_3.setText(_translate("MainWindow", "Order Date:", None))
self.label_5.setText(_translate("MainWindow", "Payment Terms:", None))
self.label_18.setText(_translate("MainWindow", "Project:", None))
self.label_4.setText(_translate("MainWindow", "Order Status:", None))
self.taxRateLabel.setText(_translate("MainWindow", "Tax Rate:", None))
self.supplierGroupBox.setTitle(_translate("MainWindow", "Supplier", None))
self.label_11.setText(_translate("MainWindow", "Address:", None))
self.label_8.setText(_translate("MainWindow", "Phone Number:", None))
self.label_9.setText(_translate("MainWindow", "Fax Number:", None))
self.label_7.setText(_translate("MainWindow", "Contact Person:", None))
self.label_10.setText(_translate("MainWindow", "Email:", None))
self.productsGroupBox.setTitle(_translate("MainWindow", "Products", None))
self.totalExcludingTaxLabel.setText(_translate("MainWindow", "Total Excluding Tax:", None))
self.totalTaxLabel.setText(_translate("MainWindow", "Total Tax:", None))
self.totalLabel.setText(_translate("MainWindow", "Total:", None))
self.deliveryGroupBox.setTitle(_translate("MainWindow", "Delivery", None))
self.label_16.setText(_translate("MainWindow", "Notes:", None))
self.label_14.setText(_translate("MainWindow", "Delivery Address:", None))
self.label_17.setText(_translate("MainWindow", "GPS Coordinates:", None))
self.label_15.setText(_translate("MainWindow", "Delivery Date:", None))
self.menuFile.setTitle(_translate("MainWindow", "&File", None))
self.menuView.setTitle(_translate("MainWindow", "&View", None))
self.menuHelp.setTitle(_translate("MainWindow", "&Help", None))
self.menuEdit.setTitle(_translate("MainWindow", "&Edit", None))
self.actionNewPurchaseOrder.setText(_translate("MainWindow", "Create &New Purchase Order", None))
self.actionView_Purchase_Order.setText(_translate("MainWindow", "View Purchase Order...", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
self.actionExit_2.setText(_translate("MainWindow", "E&xit", None))
self.actionPurchase_Order.setText(_translate("MainWindow", "Purchase Order...", None))
self.actionViewReports.setText(_translate("MainWindow", "View &Reports...", None))
self.actionAbout.setText(_translate("MainWindow", "&About", None))
self.actionOpenPurchaseOrder.setText(_translate("MainWindow", "&Open Purchase Order...", None))
self.actionCopyPurchaseOrder.setText(_translate("MainWindow", "&Copy Purchase Order", None))
self.actionClearPurchaseOrder.setText(_translate("MainWindow", "C&lear Purchase Order", None))
self.actionPrintPurchaseOrder.setText(_translate("MainWindow", "&Print Purchase Order...", None))
self.actionEditProjects.setText(_translate("MainWindow", "Edit Projects...", None))
self.actionEditSuppliers.setText(_translate("MainWindow", "Edit Suppliers...", None))
self.actionEditProducts.setText(_translate("MainWindow", "Edit Products...", None))
self.actionSavePurchaseOrder.setText(_translate("MainWindow", "Save Purchase Order", None))
self.actionExportPurchaseOrder.setText(_translate("MainWindow", "Export Purchase Order...", None))
self.actionEditConfiguration.setText(_translate("MainWindow", "Configuration Wizard...", None))
import resources_rc<|fim▁end|> | self.saveToolButton = QtGui.QToolButton(self.frame)
self.saveToolButton.setGeometry(QtCore.QRect(60, 0, 32, 32))
icon2 = QtGui.QIcon()
|
<|file_name|>enum_type_wrapper.py<|end_file_name|><|fim▁begin|># Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple wrapper around enum types to expose utility functions.
Instances are created as properties with the same name as the enum they wrap
on proto classes. For usage, see:
reflection_test.py
"""
__author__ = '[email protected] (Kevin Rabsatt)'
class EnumTypeWrapper(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type;
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (<|fim▁hole|> """Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]<|fim▁end|> | self._enum_type.name, number))
def Value(self, name): |
<|file_name|>MobileFactory.java<|end_file_name|><|fim▁begin|>package demo.designpatterns.simplefactory;
/**
* Created by Roger Xu on 2017/6/24.
*/<|fim▁hole|> if (title.equalsIgnoreCase("nokia")) {
return new Nokia();
} else if (title.equalsIgnoreCase("motorola")) {
return new Motorola();
} else {
throw new Exception("no such " + title + " mobile found");
}
}
}<|fim▁end|> | public class MobileFactory {
public Mobile getMobile(String title) throws Exception { |
<|file_name|>map_unlock_codec.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from hazelcast.protocol.builtin import DataCodec
# hex: 0x011300
_REQUEST_MESSAGE_TYPE = 70400
# hex: 0x011301
_RESPONSE_MESSAGE_TYPE = 70401
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)<|fim▁end|> | from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>declare module "fscreen" {
namespace fscreen {
export const fullscreenEnabled: boolean;
export const fullscreenElement: Element | null;
export const requestFullscreen: (elem: Element) => void;
export const requestFullscreenFunction: (elem: Element) => () => void;
export const exitFullscreen: () => void;
export let onfullscreenchange: Function;
export const addEventListener: (
event: "fullscreenchange" | "fullscreenerror",
handler: () => void,
options?: any,
) => void;
export const removeEventListener: (event: "fullscreenchange" | "fullscreenerror", handler: () => void) => void;
export let onfullscreenerror: Function;
}
<|fim▁hole|><|fim▁end|> | export default fscreen;
} |
<|file_name|>jsonutil.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import sys
def diff(a,b):
return compareTree(a, b)
def getType(a):
if isinstance(a, dict):
return 'object'
elif isinstance(a, list):
return 'array'
elif isinstance(a, str):
return 'string'
elif isinstance(a, int):
return 'number'
elif isinstance(a, bool):
return 'boolean'
return 'null'
def compareTree(a, b):
diff = {'_sys_diff':{}}
a_type = getType(a)
b_type = getType(b)
if a_type == b_type:
if a_type == 'object':
return compareTree_dict(a, b)
elif a_type == 'array':
return compareTree_list(a, b)
else:
if a == b:
return a
else:
return {'+':a,'-':b}
else:
return {'+':a,'-':b}
def compareTree_dict(a, b):
diff = {'_sys_diff':{}}
for key in a:
diff[key] = None
for key in b:
diff[key] = None
for key in diff:
if key in a and key in b:
diff[key] = compareTree(a[key], b[key])
elif key in a:
diff['_sys_diff'][key] = '+'
diff[key] = a[key]
elif key in b:
diff['_sys_diff'][key] = '-'
diff[key] = b[key]
#else:
#print 'error ' + key
return diff
def compareTree_list(a, b):
diff = []
for i in a:
diff.append(i)
for i in b:
if i in diff:
pass
else:
diff.append(i)
return diff
def merge_part(a, b):
if isinstance(a, str):
return a
if isinstance(a, int):
return a
if isinstance(a, bool):
return a
if isinstance(a, dict):
pass
if isinstance(a, list):
return a
result = {}
keys = {}
for key in a:
keys[key] = None
for key in b:
keys[key] = None
for key in keys:
if key == '_sys_diff':
continue<|fim▁hole|> state = 0
if a.has_key('_sys_diff') and key in a['_sys_diff']:
if a['_sys_diff'][key] == '+':
state = 0
elif a['_sys_diff'][key] == '-':
state = 1
else:
state = 2
if b.has_key('_sys_diff') and key in b['_sys_diff']:
if b['_sys_diff'][key] == '+':
state += 3 * 0
elif b['_sys_diff'][key] == '-':
state += 3 * 1
else:
state += 3 * 2
if state == 0:
#
#a=+ b=+
result[key] = merge_part(a[key], b[key])
elif state == 1:
#- +
pass
elif state == 2:
#none +
result[key] = b[key]
elif state == 3:
#+ -
pass
elif state == 4:
#- -
pass
elif state == 5:
#none -
pass
elif state == 6:
#+ none
result[key] = a[key]
elif state == 7:
#- none
pass
elif state == 8:
#none none
if a.has_key(key) and b.has_key(key):
result[key] = merge_part(a[key], b[key])
elif a.has_key(key):
result[key] = a[key]
elif b.has_key(key):
result[key] = b[key]
elif state == 9:
pass
return result
def clean_sys_diff(a):
if isinstance(a, dict):
if a.has_key('_sys_diff'):
del a['_sys_diff']
for key in a:
clean_sys_diff(a[str(key)])
def merge(src, dest, base):
src_diff = diff(src, base)
dest_diff = diff(dest, base)
merged_model = merge_part(src_diff, dest_diff)
clean_sys_diff(merged_model)
return merged_model<|fim▁end|> | #if key[0:4] == '_sys':
# continue |
<|file_name|>dubstep.py<|end_file_name|><|fim▁begin|>#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Dubstep
#Problem level: 6 kyu
<|fim▁hole|><|fim▁end|> | def song_decoder(song):
return " ".join(" ".join(song.split('WUB')).split()) |
<|file_name|>UserData.ts<|end_file_name|><|fim▁begin|>export class UserData {
givenName: string;
familyName: string;<|fim▁hole|> email: string;
}<|fim▁end|> | name: string; |
<|file_name|>text_io_xml.py<|end_file_name|><|fim▁begin|>bl_info = {
"name" : "text objects to-from xml",
"author" : "chebhou",
"version" : (1, 0),
"blender" : (2, 7, 3),
"location" : "file->export->text to-from xml",
"discription" : "copys an text objectx from-to xml file",
"wiki_url" : " https://github.com/chebhou",
"tracker_url" : "https://github.com/chebhou",
"category" : "Import-Export"
}
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
from bpy.props import EnumProperty, BoolProperty
from xml.dom import minidom
from xml.dom.minidom import Document
def txt_sync(filepath):
dom = minidom.parse(filepath)
scenes =dom.getElementsByTagName('scene')
for scene in scenes:
scene_name=scene.getAttribute('name')
print("\n",scene_name)
bl_scene = bpy.data.scenes[scene_name]
txt_objs =scene.getElementsByTagName('object')
for obj in txt_objs:
obj_name = obj.getAttribute('name')
obj_body = obj.childNodes[0].nodeValue
bl_obj = bl_scene.objects[obj_name].data.body = obj_body
print(obj_name," ",obj_body)
def txt_export(filepath):
doc = Document()
root = doc.createElement('data')
doc.appendChild(root)<|fim▁hole|> #create a scene
scene = doc.createElement('scene')
scene.setAttribute('name', sce.name)
root.appendChild(scene)
for obj in sce.objects :
if obj.type == 'FONT':
#add object element
object = doc.createElement('object')
object.setAttribute('name', obj.name)
txt_node = doc.createTextNode(obj.data.body)
object.appendChild(txt_node)
scene.appendChild(object)
#write to a file
file_handle = open(filepath,"wb")
file_handle.write(bytes(doc.toprettyxml(indent='\t'), 'UTF-8'))
file_handle.close()
class text_export(Operator, ExportHelper):
"""write and read text objects to a file"""
bl_idname = "export_scene.text_xml"
bl_label = "text from-to xml"
bl_options = {'REGISTER', 'UNDO'} #should remove undo ?
# ExportHelper mixin class uses this
filename_ext = ".xml"
#parameters and variables
convert = EnumProperty(
name="Convert",
description="Choose conversion",
items=(('W', "write objects", "write text objects to xml"),
('R', "read objects", "read text objects from xml")),
default='W',
)
#main function
def execute(self, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
if self.convert == 'W':
txt_export(self.filepath)
else:
txt_sync(self.filepath)
bpy.context.scene.update()
self.report({'INFO'},"Conversion is Done")
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(text_export.bl_idname, text="Text to-from xml")
def register():
bpy.utils.register_class(text_export)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(text_export)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
if __name__ == "__main__":
register()<|fim▁end|> |
for sce in bpy.data.scenes : |
<|file_name|>solver041.rs<|end_file_name|><|fim▁begin|>// COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// Rust solvers for Project Euler problems
use euler::algorithm::combinatorics::permutations_with;
use euler::algorithm::long::{from_digits_index, is_even};
use euler::algorithm::prime::miller_rabin;
use euler::Solver;
// We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once.
// For example, 2143 is a 4-digit pandigital and is also prime.
// What is the largest n-digit pandigital prime that exists?
pub struct Solver041 {<|fim▁hole|>impl Default for Solver041 {
fn default() -> Self {
Solver041 { n: 9 }
}
}
impl Solver for Solver041 {
fn solve(&self) -> isize {
// Assume the largest prime also start with the biggest digit
let predicate = |d: &[_]| if *d.last().unwrap() != d.len() as _ || is_even(*d.first().unwrap()) { None } else {
let candidate = from_digits_index(d, 0, d.len());
if miller_rabin(candidate) { Some(candidate) } else { None }
};
// If the sum of the digits of the permutation is multiple of three, all permutations are multiple of three as well
(1..=self.n).rev().filter(|&n| n % 3 != 0).find_map(|n| permutations_with(1, n, predicate).max()).unwrap()
}
}<|fim▁end|> | pub n: isize,
}
|
<|file_name|>decorators.py<|end_file_name|><|fim▁begin|>from pypermissions.permission import PermissionSet
def _prepare_runtime_permission(self, perm=None, runkw=None, args=None, kwargs=None):
"""This function parses the provided string arguments to decorators into the actual values for use when the
decorator is being evaluated. This allows for permissions to be created that rely on arguments that are provided to
the function.
:param perm: The permission string to parse
:param runkw: The run-time components to be inserted into the permission
:param args: The arguments provided to the decorated function
:param kwargs: The keyword arguments provided to the decorated function
:rtype: :py:class:`str`
"""
permission = perm
if not permission:
return False
for key, value in runkw.iteritems():
val_split = value.split('.')
for attr in val_split:
if attr == "self":
value = self
continue
elif attr in kwargs:
value = kwargs.get(attr)
continue
value = getattr(value, attr)
permission = permission.replace('{'+key+'}', value)
return permission
def set_has_permission(perm=None, perm_set=None, on_failure=None, perm_check=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param perm_check: The PermissionSet function to be used when evaluating for perm.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
def decorator(function):
def check_permission(self, *args, **kwargs):
permission = _prepare_runtime_permission(self, perm, runkw, args, kwargs)
# No permission provided, so everyone has permission.
if not permission:
return function(self, *args, **kwargs)
if not perm_set:
return on_failure(self, *args, **kwargs)
<|fim▁hole|> return function(self, *args, **kwargs)
return check_permission
return decorator
def set_grants_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has the permission specified. It allows for the permission
to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the decorated
function. For many use cases, this can be extended by decorating it with a custom decorator that will capture the
current user making the function call, and providing their permissions as the perm_set. The function provided for
use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.grants_permission, **runkw)
def set_has_any_permission(perm=None, perm_set=None, on_failure=None, **runkw):
"""This decorator checks if the provided permission set has a permission of the form specified. It allows for the
permission to rely on runtime information via runkw; which be used to modify perm based on arguments provided to the
decorated function. For many use cases, this can be extended by decorating it with a custom decorator that will
capture the current user making the function call, and providing their permissions as the perm_set. The function
provided for use when the check fails will be called with the decorated functions arguments.
:param perm: The permission to be checked. May contain {} tags to be replaced at run time.
:param perm_set: The permission set being checked for the permission.
:param on_failure: A function that gets called instead of the decorated function when perm_set does not have the
specified permission.
:param runkw: The mappings to be used to create the actual permission at run time.
"""
return set_has_permission(perm, perm_set, on_failure, perm_check=PermissionSet.has_any_permission, **runkw)<|fim▁end|> | if not perm_check(perm_set, permission):
return on_failure(self, *args, **kwargs)
|
<|file_name|>server.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess<|fim▁hole|><|fim▁end|> |
running = os.system("nc -u -l -p 5001 | mplayer -cache 1024 -")
#subprocess.check_call('/opt/vc/bin/raspivid -n -w 800 -h 600 -fps 24 -t 0 -o - | socat - udp-sendto:' + '129.16.194.248' + ':5001') |
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 Google LLC
//
// Use of this source code is governed by an MIT-style license that can be found
// in the LICENSE file or at https://opensource.org/licenses/MIT.
use std::fmt::{Debug, Display, Formatter};
use regex::Error as RegexError;
/// An error type for failures that can occur during a session.
#[derive(Debug)]
pub enum Error {
/// Action-specific failure.
Action(Box<dyn std::error::Error>),
/// Attempted to call an unknown or not implemented action.
Dispatch(String),
/// An error occurred when encoding bytes of a proto message.
Encode(prost::EncodeError),
/// An error occurred when parsing a proto message.
Parse(ParseError),
}
impl Error {
/// Converts an arbitrary action-issued error to a session error.
///
/// This function should be used to construct session errors from action
/// specific error types and propagate them further in the session pipeline.
pub fn action<E>(error: E) -> Error
where
E: std::error::Error + 'static
{
Error::Action(Box::new(error))
}
}
impl Display for Error {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
use Error::*;
match *self {
Action(ref error) => {
write!(fmt, "action error: {}", error)
}
Dispatch(ref name) if name.is_empty() => {
write!(fmt, "missing action")
}
Dispatch(ref name) => {
write!(fmt, "unknown action: {}", name)
}
Encode(ref error) => {
write!(fmt, "failure during encoding proto message: {}", error)
}
Parse(ref error) => {
write!(fmt, "malformed proto message: {}", error)
}
}
}
}
impl std::error::Error for Error {
<|fim▁hole|> fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
use Error::*;
match *self {
Action(ref error) => Some(error.as_ref()),
Dispatch(_) => None,
Encode(ref error) => Some(error),
Parse(ref error) => Some(error),
}
}
}
impl From<prost::EncodeError> for Error {
fn from(error: prost::EncodeError) -> Error {
Error::Encode(error)
}
}
impl From<ParseError> for Error {
fn from(error: ParseError) -> Error {
Error::Parse(error)
}
}
/// An error type for failures that can occur when parsing proto messages.
#[derive(Debug)]
pub enum ParseError {
/// An error occurred because the decoded proto message was malformed.
Malformed(Box<dyn std::error::Error + Send + Sync>),
/// An error occurred when decoding bytes of a proto message.
Decode(prost::DecodeError),
}
impl ParseError {
/// Converts a detailed error indicating a malformed proto to `ParseError`.
///
/// This is just a convenience function for lifting custom error types that
/// contain more specific information to generic `ParseError`.
pub fn malformed<E>(error: E) -> ParseError
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
ParseError::Malformed(error.into())
}
}
impl Display for ParseError {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
use ParseError::*;
match *self {
Malformed(ref error) => {
write!(fmt, "invalid proto message: {}", error)
}
Decode(ref error) => {
write!(fmt, "failed to decode proto message: {}", error)
}
}
}
}
impl std::error::Error for ParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
use ParseError::*;
match *self {
Malformed(ref error) => Some(error.as_ref()),
Decode(ref error) => Some(error),
}
}
}
impl From<prost::DecodeError> for ParseError {
fn from(error: prost::DecodeError) -> ParseError {
ParseError::Decode(error)
}
}
/// An error type for situations where required proto field is missing.
#[derive(Debug)]
pub struct MissingFieldError {
/// A name of the missing field.
name: &'static str,
}
impl MissingFieldError {
/// Creates a new error indicating that required field `name` is missing.
pub fn new(name: &'static str) -> MissingFieldError {
MissingFieldError {
name: name,
}
}
}
impl Display for MissingFieldError {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "required field '{}' is missing", self.name)
}
}
impl std::error::Error for MissingFieldError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
impl From<MissingFieldError> for ParseError {
fn from(error: MissingFieldError) -> ParseError {
ParseError::malformed(error)
}
}
/// An error type for situations where a given proto value is not supported.
#[derive(Debug)]
pub struct UnsupportedValueError<T> {
/// A name of the field the value belongs to.
pub name: &'static str,
/// A value that is not supported.
pub value: T,
}
impl<T: Debug> Display for UnsupportedValueError<T> {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "unsupported value for '{}': {:?}", self.name, self.value)
}
}
impl<T: Debug> std::error::Error for UnsupportedValueError<T> {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
/// An error type for situations where time micros cannot be converted
/// to `std::time::SystemTime`.
#[derive(Debug)]
pub struct TimeMicrosConversionError {
/// Time micros value causing the conversion error.
pub micros: u64,
}
impl Display for TimeMicrosConversionError {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "cannot convert micros to std::time::SystemTime: {}", self.micros)
}
}
impl std::error::Error for TimeMicrosConversionError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
impl From<TimeMicrosConversionError> for ParseError {
fn from(error: TimeMicrosConversionError) -> ParseError {
ParseError::malformed(error)
}
}
#[derive(Debug)]
pub struct RegexParseError {
/// Raw data of the string which could not be converted to Regex.
pub raw_data: Vec<u8>,
/// Error message caught during the conversion.
pub error: RegexError,
}
impl Display for RegexParseError {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Regex parse error happened on parsing '{:?}'. \
Regex error: '{}'",
self.raw_data,
self.error.to_string())
}
}
impl std::error::Error for RegexParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
impl From<RegexParseError> for ParseError {
fn from(error: RegexParseError) -> ParseError {
ParseError::malformed(error)
}
}
/// An error type for situations where proto enum has a value for which
/// the definition is not known.
#[derive(Debug)]
pub struct UnknownEnumValueError {
/// A name of the enum field having unknown enum value.
pub name: &'static str,
/// An enum value, which definition is not known.
pub value: i32,
}
impl Display for UnknownEnumValueError {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "protobuf enum '{}' has unrecognised value: '{}'",
self.name, self.value)
}
}
impl std::error::Error for UnknownEnumValueError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
impl From<UnknownEnumValueError> for ParseError {
fn from(error: UnknownEnumValueError) -> ParseError {
ParseError::malformed(error)
}
}<|fim▁end|> | |
<|file_name|>0025_auto_20170109_2017.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-09 19:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productdb', '0024_auto_20161227_1015'),
]
operations = [
migrations.AlterField(
model_name='productcheck',
name='input_product_ids',
field=models.CharField(help_text='unordered Product IDs, separated by line breaks or semicolon', max_length=65536, verbose_name='Product ID list'),
),<|fim▁hole|> ]<|fim▁end|> | |
<|file_name|>0003_auto_20170313_0117.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-13 01:17
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('dash', '0002_remove_post_origin'),
]<|fim▁hole|> migrations.AlterField(
model_name='comment',
name='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]<|fim▁end|> |
operations = [ |
<|file_name|>DefaultMetaResolver.java<|end_file_name|><|fim▁begin|>package com.ejlchina.searcher.implement;
import com.ejlchina.searcher.*;
import com.ejlchina.searcher.bean.InheritType;
import java.lang.reflect.Field;
import java.util.*;
import java.lang.reflect.Modifier;
import java.util.concurrent.ConcurrentHashMap;
/***
* 默认元信息解析器
* @author Troy.Zhou @ 2021-10-30
* @since v3.0.0
*/
public class DefaultMetaResolver implements MetaResolver {
<|fim▁hole|> private SnippetResolver snippetResolver = new DefaultSnippetResolver();
private DbMapping dbMapping;
public DefaultMetaResolver() {
this(new DefaultDbMapping());
}
public DefaultMetaResolver(DbMapping dbMapping) {
this.dbMapping = dbMapping;
}
@Override
public <T> BeanMeta<T> resolve(Class<T> beanClass) {
@SuppressWarnings("unchecked")
BeanMeta<T> beanMeta = (BeanMeta<T>) cache.get(beanClass);
if (beanMeta != null) {
return beanMeta;
}
synchronized (cache) {
beanMeta = resolveMetadata(beanClass);
cache.put(beanClass, beanMeta);
return beanMeta;
}
}
protected <T> BeanMeta<T> resolveMetadata(Class<T> beanClass) {
DbMapping.Table table = dbMapping.table(beanClass);
if (table == null) {
throw new SearchException("The class [" + beanClass.getName() + "] can not be searched, because it can not be resolved by " + dbMapping.getClass());
}
BeanMeta<T> beanMeta = new BeanMeta<>(beanClass, table.getDataSource(),
snippetResolver.resolve(table.getTables()),
snippetResolver.resolve(table.getJoinCond()),
snippetResolver.resolve(table.getGroupBy()),
table.isDistinct());
// 字段解析
Field[] fields = getBeanFields(beanClass);
for (int index = 0; index < fields.length; index++) {
Field field = fields[index];
if (Modifier.isStatic(field.getModifiers())) {
continue;
}
DbMapping.Column column = dbMapping.column(beanClass, fields[index]);
if (column == null) {
continue;
}
field.setAccessible(true);
SqlSnippet snippet = snippetResolver.resolve(column.getFieldSql());
// 注意:Oracle 数据库的别名不能以下划线开头
FieldMeta fieldMeta = new FieldMeta(beanMeta, field, snippet, "c_" + index,
column.isConditional(), column.getOnlyOn());
beanMeta.addFieldMeta(field.getName(), fieldMeta);
}
if (beanMeta.getFieldCount() == 0) {
throw new SearchException("[" + beanClass.getName() + "] is not a valid SearchBean, because there is no field mapping to database.");
}
return beanMeta;
}
protected Field[] getBeanFields(Class<?> beanClass) {
InheritType iType = dbMapping.inheritType(beanClass);
List<Field> fieldList = new ArrayList<>();
Set<String> fieldNames = new HashSet<>();
while (beanClass != Object.class) {
for (Field field : beanClass.getDeclaredFields()) {
String name = field.getName();
int modifiers = field.getModifiers();
if (field.isSynthetic() || Modifier.isStatic(modifiers)
|| Modifier.isTransient(modifiers)
|| fieldNames.contains(name)) {
continue;
}
fieldList.add(field);
fieldNames.add(name);
}
if (iType != InheritType.FIELD && iType != InheritType.ALL) {
break;
}
beanClass = beanClass.getSuperclass();
}
return fieldList.toArray(new Field[0]);
}
public SnippetResolver getSnippetResolver() {
return snippetResolver;
}
public void setSnippetResolver(SnippetResolver snippetResolver) {
this.snippetResolver = Objects.requireNonNull(snippetResolver);
}
public DbMapping getDbMapping() {
return dbMapping;
}
public void setDbMapping(DbMapping dbMapping) {
this.dbMapping = Objects.requireNonNull(dbMapping);
}
}<|fim▁end|> | private final Map<Class<?>, BeanMeta<?>> cache = new ConcurrentHashMap<>();
|
<|file_name|>test_dataset.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Unit tests for the Dataset.py module'''
import unittest
from ocw.dataset import Dataset, Bounds
import numpy as np
import datetime as dt
class TestDatasetAttributes(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.name = 'foo'
self.origin = {'path': '/a/fake/file/path'}
self.test_dataset = Dataset(self.lat,
self.lon,
self.time,
self.value,
variable=self.variable,
name=self.name,
origin=self.origin)
def test_lats(self):
self.assertItemsEqual(self.test_dataset.lats, self.lat)
def test_lons(self):
self.assertItemsEqual(self.test_dataset.lons, self.lon)
def test_times(self):
self.assertItemsEqual(self.test_dataset.times, self.time)
def test_values(self):
self.assertEqual(self.test_dataset.values.all(), self.value.all())
def test_variable(self):
self.assertEqual(self.test_dataset.variable, self.variable)
def test_name(self):
self.assertEqual(self.test_dataset.name, self.name)
def test_origin(self):
self.assertEqual(self.test_dataset.origin, self.origin)
class TestInvalidDatasetInit(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.values_in_wrong_order = flat_array.reshape(5, 5, 12)
def test_bad_lat_shape(self):
self.lat = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_lon_shape(self):
self.lon = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_times_shape(self):
self.time = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_values_shape(self):
self.value = np.array([1, 2, 3, 4, 5])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_shape_mismatch(self):
# If we change lats to this the shape of value will not match
# up with the length of the lats array.
self.lat = self.lat[:-2]
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_given_in_wrong_order(self):
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order)
def test_lons_values_incorrectly_gridded(self):
times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
lats = np.arange(-30, 30)
bad_lons = np.arange(360)
flat_array = np.arange(len(times) * len(lats) * len(bad_lons))
values = flat_array.reshape(len(times), len(lats), len(bad_lons))
ds = Dataset(lats, bad_lons, times, values)
np.testing.assert_array_equal(ds.lons, np.arange(-180, 180))
def test_reversed_lats(self):
ds = Dataset(self.lat[::-1], self.lon, self.time, self.value)
np.testing.assert_array_equal(ds.lats, self.lat)
class TestDatasetFunctions(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.test_dataset = Dataset(self.lat, self.lon, self.time,
self.value, self.variable)
def test_spatial_boundaries(self):
self.assertEqual(
self.test_dataset.spatial_boundaries(),
(min(self.lat), max(self.lat), min(self.lon), max(self.lon)))
def test_time_range(self):
self.assertEqual(
self.test_dataset.time_range(),
(dt.datetime(2000, 1, 1), dt.datetime(2000, 12, 1)))
def test_spatial_resolution(self):
self.assertEqual(self.test_dataset.spatial_resolution(), (2, 2))
def test_temporal_resolution(self):
self.assertEqual(self.test_dataset.temporal_resolution(), 'monthly')
class TestBounds(unittest.TestCase):
def setUp(self):
self.bounds = Bounds(-80, 80, # Lats
-160, 160, # Lons
dt.datetime(2000, 1, 1), # Start time
dt.datetime(2002, 1, 1)) # End time
# Latitude tests
def test_inverted_min_max_lat(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = 81
with self.assertRaises(ValueError):
self.bounds.lat_max = -81
# Lat Min
def test_out_of_bounds_lat_min(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = -91
with self.assertRaises(ValueError):
self.bounds.lat_min = 91
# Lat Max
def test_out_of_bounds_lat_max(self):
with self.assertRaises(ValueError):
self.bounds.lat_max = -91
with self.assertRaises(ValueError):
self.bounds.lat_max = 91
# Longitude tests
def test_inverted_max_max_lon(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = 161
with self.assertRaises(ValueError):
self.bounds.lon_max = -161
# Lon Min
def test_out_of_bounds_lon_min(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = -181
with self.assertRaises(ValueError):
self.bounds.lon_min = 181
# Lon Max
def test_out_of_bounds_lon_max(self):
with self.assertRaises(ValueError):
self.bounds.lon_max = -181
with self.assertRaises(ValueError):
self.bounds.lon_max = 181
# Temporal tests
def test_inverted_start_end_times(self):
with self.assertRaises(ValueError):
self.bounds.start = dt.datetime(2003, 1, 1)
with self.assertRaises(ValueError):
self.bounds.end = dt.datetime(1999, 1, 1)
# Start tests
def test_invalid_start(self):
with self.assertRaises(ValueError):<|fim▁hole|> self.bounds.start = "This is not a date time object"
# End tests
def test_invalid_end(self):
with self.assertRaises(ValueError):
self.bounds.end = "This is not a date time object"
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>controller_ref_manager.go<|end_file_name|><|fim▁begin|>package deploymentconfig
import (
"fmt"
"github.com/golang/glog"
kerrors "k8s.io/apimachinery/pkg/api/errors"
kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
kschema "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/v1"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kcontroller "k8s.io/kubernetes/pkg/controller"
)
// RSControlInterface is an interface that knows how to add or delete
// ReplicationControllers, as well as increment or decrement them. It is used
// by the DeploymentConfig controller to ease testing of actions that it takes.
type RCControlInterface interface {
PatchReplicationController(namespace, name string, data []byte) error
}
// RealRCControl is the default implementation of RCControlInterface.
type RealRCControl struct {
KubeClient kclientset.Interface
Recorder record.EventRecorder
}
// To make sure RealRCControl implements RCControlInterface
var _ RCControlInterface = &RealRCControl{}
// PatchReplicationController executes a strategic merge patch contained in 'data' on RC specified by 'namespace' and 'name'
func (r RealRCControl) PatchReplicationController(namespace, name string, data []byte) error {
_, err := r.KubeClient.Core().ReplicationControllers(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}
type RCControllerRefManager struct {
kcontroller.BaseControllerRefManager
controllerKind kschema.GroupVersionKind
rcControl RCControlInterface
}
<|fim▁hole|>// methods to manage the controllerRef of ReplicationControllers.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// RCControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewRCControllerRefManager(
rcControl RCControlInterface,
controller kmetav1.Object,
selector klabels.Selector,
controllerKind kschema.GroupVersionKind,
canAdopt func() error,
) *RCControllerRefManager {
return &RCControllerRefManager{
BaseControllerRefManager: kcontroller.BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rcControl: rcControl,
}
}
// ClaimReplicationController tries to take ownership of a ReplicationController.
//
// It will reconcile the following:
// * Adopt the ReplicationController if it's an orphan.
// * Release owned ReplicationController if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
func (m *RCControllerRefManager) ClaimReplicationController(rc *v1.ReplicationController) (bool, error) {
match := func(obj kmetav1.Object) bool {
return m.Selector.Matches(klabels.Set(obj.GetLabels()))
}
adopt := func(obj kmetav1.Object) error {
return m.AdoptReplicationController(obj.(*v1.ReplicationController))
}
release := func(obj kmetav1.Object) error {
return m.ReleaseReplicationController(obj.(*v1.ReplicationController))
}
return m.ClaimObject(rc, match, adopt, release)
}
// ClaimReplicationControllers tries to take ownership of a list of ReplicationControllers.
//
// It will reconcile the following:
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ReplicationControllers that you now own is
// returned.
func (m *RCControllerRefManager) ClaimReplicationControllers(rcs []*v1.ReplicationController) ([]*v1.ReplicationController, error) {
var claimed []*v1.ReplicationController
var errlist []error
for _, rc := range rcs {
ok, err := m.ClaimReplicationController(rc)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, rc)
}
}
return claimed, kutilerrors.NewAggregate(errlist)
}
// AdoptReplicationController sends a patch to take control of the ReplicationController. It returns the error if
// the patching fails.
func (m *RCControllerRefManager) AdoptReplicationController(rs *v1.ReplicationController) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ReplicationController %s/%s (%s): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{
"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],
"uid":"%s"
}
}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), rs.UID)
return m.rcControl.PatchReplicationController(rs.Namespace, rs.Name, []byte(addControllerPatch))
}
// ReleaseReplicationController sends a patch to free the ReplicationController from the control of the Deployment controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *RCControllerRefManager) ReleaseReplicationController(rc *v1.ReplicationController) error {
glog.V(4).Infof("patching ReplicationController %s/%s to remove its controllerRef to %s/%s:%s",
rc.Namespace, rc.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), rc.UID)
err := m.rcControl.PatchReplicationController(rc.Namespace, rc.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if kerrors.IsNotFound(err) {
// If the ReplicationController no longer exists, ignore it.
return nil
}
if kerrors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicationController
// has no owner reference, 2. the uid of the ReplicationController doesn't
// match, which means the ReplicationController is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}<|fim▁end|> | // NewRCControllerRefManager returns a RCControllerRefManager that exposes |
<|file_name|>parse_clinical_trials.py<|end_file_name|><|fim▁begin|>##############################################################################
# Clinical trials parser
#
# eg 2013-2016
##############################################################################
import cPickle, os, re
def main():
#base_dir = "../data/ct/"
base_dir = "/home/eguney/data/ct/"
file_name = base_dir + "ct.csv"
output_data(base_dir, file_name)
return
def output_data(base_dir, file_name):
drug_to_ctids = get_interventions(base_dir, include_other_names=True) #False)
print len(drug_to_ctids), drug_to_ctids.items()[:5]
ctid_to_conditions = get_ctid_to_conditions(base_dir)
print len(ctid_to_conditions), ctid_to_conditions.items()[:5]
ctid_to_values = get_ctid_to_details(base_dir)
print len(ctid_to_values), ctid_to_values.items()[:5]
f = open(file_name, 'w')
f.write("Drug\tClinical trial Id\tPhase\tStatus\tFDA regulated\tWhy stopped\tResults date\tConditions\n")
for drug, ctids in drug_to_ctids.iteritems():
for ctid in ctids:
values = [ drug, ctid ]
if ctid in ctid_to_values:
#phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
values.extend(ctid_to_values[ctid])
if ctid in ctid_to_conditions:
conditions = ctid_to_conditions[ctid]
values.append(" | ".join(conditions))
f.write("%s\n" % "\t".join(values))
f.close()
return
def get_disease_specific_drugs(drug_to_diseases, phenotype_to_mesh_id):
disease_to_drugs = {}
mesh_id_to_phenotype = {}
for phenotype, mesh_id in phenotype_to_mesh_id.items():
mesh_id_to_phenotype[mesh_id] = phenotype
for drugbank_id, diseases in drug_to_diseases.iteritems():
for phenotype, dui, val in diseases:
if val > 0:
if dui in mesh_id_to_phenotype: # In the disease data set
disease = mesh_id_to_phenotype[dui].lower()
disease_to_drugs.setdefault(disease, set()).add(drugbank_id)
return disease_to_drugs
def get_drug_disease_mapping(base_dir, selected_drugs, name_to_drug, synonym_to_drug, mesh_id_to_name, mesh_id_to_name_with_synonyms, dump_file):
if os.path.exists(dump_file):
drug_to_diseases = cPickle.load(open(dump_file))
return drug_to_diseases
# Get mesh name to mesh id mapping
mesh_name_to_id = {}
for mesh_id, names in mesh_id_to_name_with_synonyms.iteritems():
for name in names:
for name_mod in [ name, name.replace(",", ""), name.replace("-", " "), name.replace(",", "").replace("-", " ") ]:
mesh_name_to_id[name_mod] = mesh_id
# Get CT info
drug_to_ctids, ctid_to_conditions, ctid_to_values = get_ct_data(base_dir, include_other_names=True)
# Get CT - MeSH disease mapping
intervention_to_mesh_name = {}
interventions = reduce(lambda x,y: x|y, ctid_to_conditions.values())
for intervention in interventions:
if intervention.endswith('s'):
intervention = intervention[:-1]
idx = intervention.find("(")
if idx != -1:
intervention = intervention[:idx].rstrip()
try:
exp = re.compile(r"\b%ss{,1}\b" % re.escape(intervention))
except:
print "Problem with regular expression:", intervention
for mesh_name, dui in mesh_name_to_id.iteritems():
m = exp.search(mesh_name)
if m is None:
continue
elif len(mesh_name.split()) != len(intervention.split()): # no partial overlap
continue
phenotype = mesh_id_to_name[dui]
intervention_to_mesh_name[intervention] = phenotype
break
#print len(intervention_to_mesh_name), intervention_to_mesh_name.items()[:5]
# Get interventions
phase_to_value = { "Phase 0": 0.5, "Phase 1": 0.6, "Phase 1/Phase 2": 0.65, "Phase 2": 0.7, "Phase 2/Phase 3": 0.75, "Phase 3": 0.8, "Phase 3/Phase 4":0.85, "Phase 4": 0.9, "N/A": 0.5 }
status_to_value = { "Terminated": -0.5, "Withdrawn": -1} #,"Completed", "Recruiting", "Not yet recruiting"
drug_to_diseases = {}
drug_to_diseases_n_study = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
if selected_drugs is not None and drugbank_id not in selected_drugs:
continue
phenotype_to_count = {}
for ctid in ctids:
phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
val = 0.5
if phase not in phase_to_value:
print "Unknown phase:", phase
if status in status_to_value and phase in phase_to_value:
val = phase_to_value[phase] - 0.1
for intervention in ctid_to_conditions[ctid]:
if intervention not in intervention_to_mesh_name:
continue
phenotype = intervention_to_mesh_name[intervention]
i = phenotype_to_count.setdefault(phenotype, 0)
phenotype_to_count[phenotype] = i + 1
dui = mesh_name_to_id[phenotype]
# Phase based value assignment
drug_to_diseases.setdefault(drugbank_id, set()).add((phenotype, dui, val))
# Number of study based value assignment
for phenotype, val in phenotype_to_count.iteritems():
dui = mesh_name_to_id[phenotype]
drug_to_diseases_n_study.setdefault(drugbank_id, set()).add((phenotype, dui, val))
#drug_to_diseases = drug_to_diseases_n_study
#print "Non matching drugs:", len(non_matching_drugs)
#print len(drug_to_diseases), drug_to_diseases.items()[:5]
cPickle.dump(drug_to_diseases, open(dump_file, 'w'))
return drug_to_diseases
def get_ct_data(base_dir, include_other_names=True, dump_file=None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
#drug_to_ctids, ctid_to_conditions, ctid_to_values = values
return values
drug_to_ctids = get_interventions(base_dir, include_other_names)
ctid_to_conditions = get_ctid_to_conditions(base_dir)
ctid_to_values = get_ctid_to_details(base_dir)
values = drug_to_ctids, ctid_to_conditions, ctid_to_values
if dump_file is not None:
cPickle.dump(values, open(dump_file, 'w'))
return values
def get_ctid_to_conditions(base_dir):
condition_file = base_dir + "conditions.txt"
condition_file2 = base_dir + "condition_browse.txt"
# Get conditions
ctid_to_conditions = {}
f = open(condition_file)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
f = open(condition_file2)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
def get_ctid_to_details(base_dir):
study_file = base_dir + "clinical_study.txt" # _noclob
# Get phase etc information
f = open(study_file)
line = f.readline()
words = line.strip().split("|")
header_to_idx = dict((word.lower(), i) for i, word in enumerate(words))
text = None
ctid_to_values = {}
while line:
line = f.readline()
if line.startswith("NCT"):
if text is not None:
words = text.strip().split("|")
ctid = words[0]
try:
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
fda_regulated = words[header_to_idx["is_fda_regulated"]]
why_stopped = words[header_to_idx["why_stopped"]]
results_date = words[header_to_idx["firstreceived_results_date"]]
except:
print words
return
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
text = line
else:
text += line
f.close()
words = text.strip().split("|")
ctid = words[0]
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
return ctid_to_values
def get_interventions(base_dir, include_other_names=True):
#ctid_to_drugs = {}
drug_to_ctids = {}
intervention_file = base_dir + "interventions.txt"
f = open(intervention_file)
f.readline()
#prev_row = 0
ignored_intervention_types = set()
for line in f:
words = line.strip().split("|")
try:
row = int(words[0])
#if row != prev_row + 1:
# continue
except:
continue
#prev_row += 1
if len(words) < 5:
#print words
continue
ctid = words[1]
intervention = words[2]
drug = words[3]
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
if intervention != "Drug" and intervention != "Biological" :
ignored_intervention_types.add(intervention)
continue
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
#conditions = drug_to_interventions.setdefault(drug, set())
#conditions |= ctid_to_conditions[ctid]
f.close()
print "Ignored intervention types:", ignored_intervention_types
if include_other_names:
intervention_file = base_dir + "intervention_browse.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[2] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
intervention_file = base_dir + "intervention_other_names.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[3] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
return drug_to_ctids #ctid_to_drugs
def get_drug_to_interventions(drug_to_ctids):
drug_to_interventions = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]<|fim▁hole|> non_matching_drugs.add(drug)
continue
values = set()
for ctid in ctids:
#if ctid_to_values[ctid][0] != "Phase 3":
# continue
values |= ctid_to_conditions[ctid]
if len(values) == 0:
continue
drug_to_interventions.setdefault(drugbank_id, values)
#print "Non matching drugs:", len(non_matching_drugs)
#phenotypes = disease_to_drugs.keys()
#disease_to_interventions = {}
#for drug, interventions in drug_to_interventions.iteritems():
# for intervention in interventions:
# intervention = intervention.lower()
# for disease in phenotypes:
# values = text_utilities.tokenize_disease_name(disease)
# if all([ intervention.find(word.strip()) != -1 for word in values ]): # disease.split(",") ]):
# disease_to_drugs_ct.setdefault(disease, set()).add(drug)
# disease_to_interventions.setdefault(disease, set()).add(intervention)
#for disease, interventions in disease_to_interventions.iteritems():
# print disease, interventions
#print len(drug_to_interventions), drug_to_interventions.items()[:5]
#print drug_to_ctids["voriconazole"], print ctid_to_conditions["NCT00005912"], print ctid_to_values["NCT00005912"]
#print drug_to_interventions["DB00582"]
return drug_to_interventions
def get_frequent_interventions(drug_to_interventions):
condition_to_count = {}
for drug, interventions in drug_to_interventions.iteritems():
for condition in interventions:
if condition in condition_to_count:
condition_to_count[condition] += 1
else:
condition_to_count[condition] = 1
values = []
for condition, count in condition_to_count.iteritems():
values.append((count, condition))
values.sort()
values.reverse()
#print values[:50]
return values
if __name__ == "__main__":
main()<|fim▁end|> | elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else: |
<|file_name|>example_test.go<|end_file_name|><|fim▁begin|>package compress_test
import (
"io"
"net/http"
"github.com/mkch/burrow/compress"
)<|fim▁hole|>}
func ExampleNewResponseWriter() {
handler := func(w http.ResponseWriter, r *http.Request) {
cw, _ := compress.NewResponseWriter(w, compress.DefaultGzipWriterFactory)
io.WriteString(cw, "content to write")
}
http.ListenAndServe(":8080", http.HandlerFunc(handler))
}<|fim▁end|> |
func ExampleNewHandler() {
http.ListenAndServe(":8080", compress.NewHandler(http.DefaultServeMux, nil)) |
<|file_name|>api_op_DeprovisionIpamPoolCidr.go<|end_file_name|><|fim▁begin|>// Code generated by smithy-go-codegen DO NOT EDIT.
package ec2
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from
// a pool that has a source pool, the CIDR is recycled back into the source pool.
// For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User
// Guide.
func (c *Client) DeprovisionIpamPoolCidr(ctx context.Context, params *DeprovisionIpamPoolCidrInput, optFns ...func(*Options)) (*DeprovisionIpamPoolCidrOutput, error) {
if params == nil {
params = &DeprovisionIpamPoolCidrInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DeprovisionIpamPoolCidr", params, optFns, c.addOperationDeprovisionIpamPoolCidrMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DeprovisionIpamPoolCidrOutput)
out.ResultMetadata = metadata
return out, nil
}
type DeprovisionIpamPoolCidrInput struct {
// The ID of the pool that has the CIDR you want to deprovision.
//
// This member is required.
IpamPoolId *string
// The CIDR which you want to deprovision from the pool.
Cidr *string
// A check for whether you have the required permissions for the action without
// actually making the request and provides an error response. If you have the
// required permissions, the error response is DryRunOperation. Otherwise, it is
// UnauthorizedOperation.
DryRun *bool
noSmithyDocumentSerde
}
type DeprovisionIpamPoolCidrOutput struct {
// The deprovisioned pool CIDR.
IpamPoolCidr *types.IpamPoolCidr
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDeprovisionIpamPoolCidrMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpDeprovisionIpamPoolCidr{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpDeprovisionIpamPoolCidr{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}<|fim▁hole|> if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDeprovisionIpamPoolCidrValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeprovisionIpamPoolCidr(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opDeprovisionIpamPoolCidr(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "DeprovisionIpamPoolCidr",
}
}<|fim▁end|> | if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
} |
<|file_name|>map_element.cpp<|end_file_name|><|fim▁begin|>#include "platform/i_platform.h"
#include "core/map/map_element.h"
namespace map {
MapElement::~MapElement()
{
}
MapElement::MapElement( int32_t Id )
: mIdentifier( 0 )
, mId( Id )
, mSpawnedActorGUID( -1 )
{
SetNextUID();
}
void MapElement::SetNextUID()
{
static int32_t NextUID = 0;
mUID = ++NextUID;
}
int32_t MapElement::GetIdentifier()
{
return mIdentifier;
}
void MapElement::Load( Json::Value& setters )
{
std::string identifier;
if ( Json::GetStr( setters["identifier"], identifier ) )
{
mIdentifier = AutoId( identifier );
}
}
void MapElement::Save( Json::Value& Element )
{
std::string elementName;
if ( IdStorage::Get().GetName( mId, elementName ) )
{
Element["name"] = Json::Value( elementName );
}
std::string identifierName;
if ( IdStorage::Get().GetName( mIdentifier, identifierName ) )
{
Element["identifier"] = Json::Value( identifierName );
}
}
void MapElement::SetIdentifier( int32_t uId )
{
mIdentifier = uId;
}
void MapElement::SetSpawnedActorGUID( int32_t spawnedActorGUID )
{
mSpawnedActorGUID = spawnedActorGUID;
}
int32_t MapElement::GetSpawnedActorGUID() const
{
return mSpawnedActorGUID;
}
int32_t MapElement::GetUID() const
{
return mUID;
}
MapElement& MapElement::operator=( MapElement const& other )
{
mIdentifier = other.mIdentifier;
mId = other.mId;
mSpawnedActorGUID = other.mSpawnedActorGUID;
// mUID should be unique
SetNextUID();<|fim▁hole|> : MapElement( Id )
{
}
} // namespace map<|fim▁end|> | return *this;
}
DefaultMapElement::DefaultMapElement( int32_t Id ) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright (C) 2015 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
#
"""
Create, read and write yambo input files
Read, modify and write yambo databases
Analyse results from yambo calculations
Modules:
io
- YamboIn: read, write and manipulate yambo input files
- YamboOut: read yambo output files and save in .json
dbs
- YamboSaveDB: read information in the ns.db1
- YamboLatticeDB: read lattice parameters, symmetries and k-points from ns.db1
- YamboDipolesDB: dipole matrix elements from ndb.dip*
- YamboStaticScreeningDB: static dielectric screening from ndb.em1s*
- YamboElectronsDB: read the electronic states from ns.db1
- YamboQPDB: read the quasiparticle energies db ndb.QP
- YamboGreenDB: read the green's functions calculated using yambo
bse
- YamboExcitonWaveFunctionXSF: read the excitonic
- YamboExcitonWeight: read the excitonic weights from the ypp output file
- YamboBSEAbsorptionSpectra: generate a .json file with the bse absorption calculation (including information about the excitons)
analyse:
- YamboAnalyser: read .json files generated with yamboout and plot them together
- recipes: user contributed scripts
"""
import numpy as np
from yambopy.jsonencoder import *
from yambopy.plot import *
from yambopy.units import *
#lattice stuff
from yambopy.lattice import *
#yambo databases
from yambopy.dbs.savedb import *
from yambopy.dbs.dipolesdb import *
from yambopy.dbs.qpdb import *
from yambopy.dbs.em1sdb import *
from yambopy.dbs.greendb import *<|fim▁hole|>from yambopy.dbs.rtdb import *
from yambopy.dbs.excitondb import *
from yambopy.dbs.wfdb import *
from yambopy.dbs.elphondb import *
#input/output files
from yambopy.io.inputfile import *
from yambopy.io.outputfile import *
#bse/excitons files
from yambopy.bse.excitonwf import *
from yambopy.bse.excitonweight import *
from yambopy.bse.bse_absorption import *
#analyse stuff
from yambopy.analyse import *
from yambopy.recipes import *<|fim▁end|> | from yambopy.dbs.latticedb import *
from yambopy.dbs.electronsdb import * |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.