prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>PipeItemsSterlingSilver.java<|end_file_name|><|fim▁begin|>package ua.pp.shurgent.tfctech.integration.bc.blocks.pipes.transport; import net.minecraft.item.Item; import net.minecraftforge.common.util.ForgeDirection;<|fim▁hole|>import ua.pp.shurgent.tfctech.integration.bc.BCStuff; import ua.pp.shurgent.tfctech.integration.bc.ModPipeIconProvider; import ua.pp.shurgent.tfctech.integration.bc.blocks.pipes.handlers.PipeItemsInsertionHandler; import buildcraft.api.core.IIconProvider; import buildcraft.transport.pipes.PipeItemsQuartz; import buildcraft.transport.pipes.events.PipeEventItem; public class PipeItemsSterlingSilver extends PipeItemsQuartz { public PipeItemsSterlingSilver(Item item) { super(item); } @Override public IIconProvider getIconProvider() { return BCStuff.pipeIconProvider; } @Override public int getIconIndex(ForgeDirection direction) { return ModPipeIconProvider.TYPE.PipeItemsSterlingSilver.ordinal(); } public void eventHandler(PipeEventItem.AdjustSpeed event) { super.eventHandler(event); } public void eventHandler(PipeEventItem.Entered event) { event.item.setInsertionHandler(PipeItemsInsertionHandler.INSTANCE); } }<|fim▁end|>
<|file_name|>rpcmining.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "main.h" #include "db.h" #include "init.h" #include "bitcoinrpc.h" using namespace json_spirit; using namespace std; // Return average network hashes per second based on the last 'lookup' blocks, // or from the last difficulty change if 'lookup' is nonpositive. // If 'height' is nonnegative, compute the estimate at the time when a given block was found. Value GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = pindexBest; if (height >= 0 && height < nBestHeight) pb = FindBlockByHeight(height); if (pb == NULL || !pb->nHeight) return 0; // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) lookup = pb->nHeight % 2016 + 1; // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) lookup = pb->nHeight; CBlockIndex *pb0 = pb; int64 minTime = pb0->GetBlockTime(); int64 maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64 time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a divide by zero exception. if (minTime == maxTime) return 0; uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64 timeDiff = maxTime - minTime; return (boost::int64_t)(workDiff.getdouble() / timeDiff); } Value getnetworkhashps(const Array& params, bool fHelp) { if (fHelp || params.size() > 2) throw runtime_error( "getnetworkhashps [blocks] [height]\n" "Returns the estimated network hashes per second based on the last 120 blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a certain block was found."); return GetNetworkHashPS(params.size() > 0 ? params[0].get_int() : 120, params.size() > 1 ? params[1].get_int() : -1); } // Key used by getwork/getblocktemplate miners. // Allocated in InitRPCMining, free'd in ShutdownRPCMining static CReserveKey* pMiningKey = NULL; void InitRPCMining() { if (!pwalletMain) return; // getwork/getblocktemplate mining rewards paid here: pMiningKey = new CReserveKey(pwalletMain); } void ShutdownRPCMining() { if (!pMiningKey) return; delete pMiningKey; pMiningKey = NULL; } Value getgenerate(const Array& params, bool fHelp) { if (fHelp || params.size() != 0) throw runtime_error( "getgenerate\n" "Returns true or false."); if (!pMiningKey) return false; return GetBoolArg("-gen"); } Value setgenerate(const Array& params, bool fHelp) { if (fHelp || params.size() < 1 || params.size() > 2) throw runtime_error( "setgenerate <generate> [genproclimit]\n" "<generate> is true or false to turn generation on or off.\n" "Generation is limited to [genproclimit] processors, -1 is unlimited."); bool fGenerate = true; if (params.size() > 0) fGenerate = params[0].get_bool(); if (params.size() > 1) { int nGenProcLimit = params[1].get_int(); mapArgs["-genproclimit"] = itostr(nGenProcLimit); if (nGenProcLimit == 0) fGenerate = false; } mapArgs["-gen"] = (fGenerate ? "1" : "0"); assert(pwalletMain != NULL); GenerateBitcoins(fGenerate, pwalletMain); return Value::null; } Value gethashespersec(const Array& params, bool fHelp) { if (fHelp || params.size() != 0) throw runtime_error( "gethashespersec\n" "Returns a recent hashes per second performance measurement while generating."); if (GetTimeMillis() - nHPSTimerStart > 8000) return (boost::int64_t)0; return (boost::int64_t)dHashesPerSec; } Value getmininginfo(const Array& params, bool fHelp) { if (fHelp || params.size() != 0) throw runtime_error( "getmininginfo\n" "Returns an object containing mining-related information."); Object obj; obj.push_back(Pair("blocks", (int)nBestHeight)); obj.push_back(Pair("currentblocksize",(uint64_t)nLastBlockSize)); obj.push_back(Pair("currentblocktx",(uint64_t)nLastBlockTx)); obj.push_back(Pair("difficulty", (double)GetDifficulty())); obj.push_back(Pair("errors", GetWarnings("statusbar"))); obj.push_back(Pair("generate", GetBoolArg("-gen"))); obj.push_back(Pair("genproclimit", (int)GetArg("-genproclimit", -1))); obj.push_back(Pair("hashespersec", gethashespersec(params, false))); obj.push_back(Pair("networkhashps", getnetworkhashps(params, false)));<|fim▁hole|> obj.push_back(Pair("pooledtx", (uint64_t)mempool.size())); obj.push_back(Pair("testnet", fTestNet)); return obj; } Value getworkex(const Array& params, bool fHelp) { if (fHelp || params.size() > 2) throw runtime_error( "getworkex [data, coinbase]\n" "If [data, coinbase] is not specified, returns extended work data.\n" ); if (vNodes.empty()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Corecoin is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Corecoin is downloading blocks..."); typedef map<uint256, pair<CBlock*, CScript> > mapNewBlock_t; static mapNewBlock_t mapNewBlock; // FIXME: thread safety static vector<CBlockTemplate*> vNewBlockTemplate; static CReserveKey reservekey(pwalletMain); if (params.size() == 0) { // Update block static unsigned int nTransactionsUpdatedLast; static CBlockIndex* pindexPrev; static int64 nStart; static CBlockTemplate* pblocktemplate; if (pindexPrev != pindexBest || (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)) { if (pindexPrev != pindexBest) { // Deallocate old blocks since they're obsolete now mapNewBlock.clear(); BOOST_FOREACH(CBlockTemplate* pblocktemplate, vNewBlockTemplate) delete pblocktemplate; vNewBlockTemplate.clear(); } // Clear pindexPrev so future getworks make a new block, despite any failures from here on pindexPrev = NULL; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = nTransactionsUpdated; CBlockIndex* pindexPrevNew = pindexBest; nStart = GetTime(); // Create new block pblocktemplate = CreateNewBlockWithKey(*pMiningKey); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); vNewBlockTemplate.push_back(pblocktemplate); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } CBlock* pblock = &pblocktemplate->block; // pointer for convenience // Update nTime pblock->UpdateTime(pindexPrev); pblock->nNonce = 0; // Update nExtraNonce static unsigned int nExtraNonce = 0; IncrementExtraNonce(pblock, pindexPrev, nExtraNonce); // Save mapNewBlock[pblock->hashMerkleRoot] = make_pair(pblock, pblock->vtx[0].vin[0].scriptSig); // Pre-build hash buffers char pmidstate[32]; char pdata[128]; char phash1[64]; FormatHashBuffers(pblock, pmidstate, pdata, phash1); uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256(); CTransaction coinbaseTx = pblock->vtx[0]; std::vector<uint256> merkle = pblock->GetMerkleBranch(0); Object result; result.push_back(Pair("data", HexStr(BEGIN(pdata), END(pdata)))); result.push_back(Pair("target", HexStr(BEGIN(hashTarget), END(hashTarget)))); CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << coinbaseTx; result.push_back(Pair("coinbase", HexStr(ssTx.begin(), ssTx.end()))); Array merkle_arr; BOOST_FOREACH(uint256 merkleh, merkle) { printf("%s\n", merkleh.ToString().c_str()); merkle_arr.push_back(HexStr(BEGIN(merkleh), END(merkleh))); } result.push_back(Pair("merkle", merkle_arr)); return result; } else { // Parse parameters vector<unsigned char> vchData = ParseHex(params[0].get_str()); vector<unsigned char> coinbase; if(params.size() == 2) coinbase = ParseHex(params[1].get_str()); if (vchData.size() != 128) throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter"); CBlock* pdata = (CBlock*)&vchData[0]; // Byte reverse for (int i = 0; i < 128/4; i++) ((unsigned int*)pdata)[i] = ByteReverse(((unsigned int*)pdata)[i]); // Get saved block if (!mapNewBlock.count(pdata->hashMerkleRoot)) return false; CBlock* pblock = mapNewBlock[pdata->hashMerkleRoot].first; pblock->nTime = pdata->nTime; pblock->nNonce = pdata->nNonce; if(coinbase.size() == 0) pblock->vtx[0].vin[0].scriptSig = mapNewBlock[pdata->hashMerkleRoot].second; else CDataStream(coinbase, SER_NETWORK, PROTOCOL_VERSION) >> pblock->vtx[0]; pblock->hashMerkleRoot = pblock->BuildMerkleTree(); return CheckWork(pblock, *pwalletMain, reservekey); } } Value getwork(const Array& params, bool fHelp) { if (fHelp || params.size() > 1) throw runtime_error( "getwork [data]\n" "If [data] is not specified, returns formatted hash data to work on:\n" " \"midstate\" : precomputed hash state after hashing the first half of the data (DEPRECATED)\n" // deprecated " \"data\" : block data\n" " \"hash1\" : formatted hash buffer for second hash (DEPRECATED)\n" // deprecated " \"target\" : little endian hash target\n" "If [data] is specified, tries to solve the block and returns true if it was successful."); if (vNodes.empty()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Corecoin is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Corecoin is downloading blocks..."); typedef map<uint256, pair<CBlock*, CScript> > mapNewBlock_t; static mapNewBlock_t mapNewBlock; // FIXME: thread safety static vector<CBlockTemplate*> vNewBlockTemplate; if (params.size() == 0) { // Update block static unsigned int nTransactionsUpdatedLast; static CBlockIndex* pindexPrev; static int64 nStart; static CBlockTemplate* pblocktemplate; if (pindexPrev != pindexBest || (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)) { if (pindexPrev != pindexBest) { // Deallocate old blocks since they're obsolete now mapNewBlock.clear(); BOOST_FOREACH(CBlockTemplate* pblocktemplate, vNewBlockTemplate) delete pblocktemplate; vNewBlockTemplate.clear(); } // Clear pindexPrev so future getworks make a new block, despite any failures from here on pindexPrev = NULL; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = nTransactionsUpdated; CBlockIndex* pindexPrevNew = pindexBest; nStart = GetTime(); // Create new block pblocktemplate = CreateNewBlockWithKey(*pMiningKey); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); vNewBlockTemplate.push_back(pblocktemplate); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } CBlock* pblock = &pblocktemplate->block; // pointer for convenience // Update nTime pblock->UpdateTime(pindexPrev); pblock->nNonce = 0; // Update nExtraNonce static unsigned int nExtraNonce = 0; IncrementExtraNonce(pblock, pindexPrev, nExtraNonce); // Save mapNewBlock[pblock->hashMerkleRoot] = make_pair(pblock, pblock->vtx[0].vin[0].scriptSig); // Pre-build hash buffers char pmidstate[32]; char pdata[128]; char phash1[64]; FormatHashBuffers(pblock, pmidstate, pdata, phash1); uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256(); Object result; result.push_back(Pair("midstate", HexStr(BEGIN(pmidstate), END(pmidstate)))); // deprecated result.push_back(Pair("data", HexStr(BEGIN(pdata), END(pdata)))); result.push_back(Pair("hash1", HexStr(BEGIN(phash1), END(phash1)))); // deprecated result.push_back(Pair("target", HexStr(BEGIN(hashTarget), END(hashTarget)))); return result; } else { // Parse parameters vector<unsigned char> vchData = ParseHex(params[0].get_str()); if (vchData.size() != 128) throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter"); CBlock* pdata = (CBlock*)&vchData[0]; // Byte reverse for (int i = 0; i < 128/4; i++) ((unsigned int*)pdata)[i] = ByteReverse(((unsigned int*)pdata)[i]); // Get saved block if (!mapNewBlock.count(pdata->hashMerkleRoot)) return false; CBlock* pblock = mapNewBlock[pdata->hashMerkleRoot].first; pblock->nTime = pdata->nTime; pblock->nNonce = pdata->nNonce; pblock->vtx[0].vin[0].scriptSig = mapNewBlock[pdata->hashMerkleRoot].second; pblock->hashMerkleRoot = pblock->BuildMerkleTree(); assert(pwalletMain != NULL); return CheckWork(pblock, *pwalletMain, *pMiningKey); } } Value getblocktemplate(const Array& params, bool fHelp) { if (fHelp || params.size() > 1) throw runtime_error( "getblocktemplate [params]\n" "Returns data needed to construct a block to work on:\n" " \"version\" : block version\n" " \"previousblockhash\" : hash of current highest block\n" " \"transactions\" : contents of non-coinbase transactions that should be included in the next block\n" " \"coinbaseaux\" : data that should be included in coinbase\n" " \"coinbasevalue\" : maximum allowable input to coinbase transaction, including the generation award and transaction fees\n" " \"target\" : hash target\n" " \"mintime\" : minimum timestamp appropriate for next block\n" " \"curtime\" : current timestamp\n" " \"mutable\" : list of ways the block template may be changed\n" " \"noncerange\" : range of valid nonces\n" " \"sigoplimit\" : limit of sigops in blocks\n" " \"sizelimit\" : limit of block size\n" " \"bits\" : compressed target of next block\n" " \"height\" : height of the next block\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification."); std::string strMode = "template"; if (params.size() > 0) { const Object& oparam = params[0].get_obj(); const Value& modeval = find_value(oparam, "mode"); if (modeval.type() == str_type) strMode = modeval.get_str(); else if (modeval.type() == null_type) { /* Do nothing */ } else throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (strMode != "template") throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); if (vNodes.empty()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Corecoin is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Corecoin is downloading blocks..."); // Update block static unsigned int nTransactionsUpdatedLast; static CBlockIndex* pindexPrev; static int64 nStart; static CBlockTemplate* pblocktemplate; if (pindexPrev != pindexBest || (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any failures from here on pindexPrev = NULL; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = nTransactionsUpdated; CBlockIndex* pindexPrevNew = pindexBest; nStart = GetTime(); // Create new block if(pblocktemplate) { delete pblocktemplate; pblocktemplate = NULL; } CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = CreateNewBlock(scriptDummy); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } CBlock* pblock = &pblocktemplate->block; // pointer for convenience // Update nTime pblock->UpdateTime(pindexPrev); pblock->nNonce = 0; Array transactions; map<uint256, int64_t> setTxIndex; int i = 0; BOOST_FOREACH (CTransaction& tx, pblock->vtx) { uint256 txHash = tx.GetHash(); setTxIndex[txHash] = i++; if (tx.IsCoinBase()) continue; Object entry; CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << tx; entry.push_back(Pair("data", HexStr(ssTx.begin(), ssTx.end()))); entry.push_back(Pair("hash", txHash.GetHex())); Array deps; BOOST_FOREACH (const CTxIn &in, tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.push_back(Pair("depends", deps)); int index_in_template = i - 1; entry.push_back(Pair("fee", pblocktemplate->vTxFees[index_in_template])); entry.push_back(Pair("sigops", pblocktemplate->vTxSigOps[index_in_template])); transactions.push_back(entry); } Object aux; aux.push_back(Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end()))); uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256(); static Array aMutable; if (aMutable.empty()) { aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); } Object result; result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0].vout[0].nValue)); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); result.push_back(Pair("sigoplimit", (int64_t)MAX_BLOCK_SIGOPS)); result.push_back(Pair("sizelimit", (int64_t)MAX_BLOCK_SIZE)); result.push_back(Pair("curtime", (int64_t)pblock->nTime)); result.push_back(Pair("bits", HexBits(pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1))); return result; } Value submitblock(const Array& params, bool fHelp) { if (fHelp || params.size() < 1 || params.size() > 2) throw runtime_error( "submitblock <hex data> [optional-params-obj]\n" "[optional-params-obj] parameter is currently ignored.\n" "Attempts to submit new block to network.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification."); vector<unsigned char> blockData(ParseHex(params[0].get_str())); CDataStream ssBlock(blockData, SER_NETWORK, PROTOCOL_VERSION); CBlock pblock; try { ssBlock >> pblock; } catch (std::exception &e) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } CValidationState state; bool fAccepted = ProcessBlock(state, NULL, &pblock); if (!fAccepted) return "rejected"; // TODO: report validation state return Value::null; }<|fim▁end|>
<|file_name|>managers.py<|end_file_name|><|fim▁begin|>from .message import * from functools import wraps import datetime import pymongo import re from app import session class Singleton(type): instance = None def __call__(cls, *args, **kwargs): if not cls.instance: cls.instance = super(Singleton, cls).__call__(*args, **kwargs) return cls.instance class APIManager(metaclass=Singleton): STATELESS_PROCESS = { '오늘의 식단': FoodMessage, '운영시간': TimeTableMessage, '학식': PupilFoodMessage, '교식': FacultyFoodMessage, # '기식': DormFoodMessage, '푸드코트': FoodCourtMessage, '스낵코너': SnackCornerMessage, '더 키친': TheKitchenMessage, '버스': BusMessage, '정문(20166)': BusFrontMessage, '베라 앞(20165)': BusBeraMessage, '중문(20169)': BusMiddleMessage, '지하철': SubMessage, '도서관': LibMessage, } PROCESS = { '내일의 식단': [ { '내일의 식단': TomorrowFoodMessage, }, { '학식': TomorrowPupilFoodMessage, '교식': TomorrowFacultyFoodMessage, # '기식': TomorrowDormFoodMessage, '푸드코트': TomorrowFoodCourtMessage, '스낵코너': TomorrowSnackCornerMessage, '더 키친': TomorrowTheKitchenMessage, }, ], # '도서관': [ # { # '도서관': LibMessage, # }, # { # # 일단 예외로 둔다 # '*': OnGoingMessage, # } # ], '식단 리뷰': [ { '식단 리뷰': ReviewInitMessage, }, { '리뷰 보기': ReviewBrowseMessage, '리뷰 남기기': ReviewPostMessage, '리뷰 삭제하기': OnGoingMessage, }, { # 리뷰 남기기 하면 3단계까지 옴 키보드로 입력받은 문자열이 오기때문에 가능성이 다양함 '*': OnGoingMessage, } ], } def handle_process(self, process, user_key, content): """ 연속되는 문답이 필요한 항목들을 처리한다. :return: Message Object """ if process == '도서관': if '열람실' in content: room = content[0] # '1 열람실 (이용률: 9.11%)'[0]하면 1만 빠져나온다 msg = LibStatMessage(room=room) UserSessionAdmin.delete(user_key) else: UserSessionAdmin.delete(user_key) return FailMessage('도서관 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.') return msg elif process == '식단 리뷰': if content in self.PROCESS[process][1]: new_msg = self.PROCESS[process][1][content] if content in ['리뷰 보기', '리뷰 삭제']: UserSessionAdmin.delete(user_key) return new_msg() else: UserSessionAdmin.delete(user_key) return ReviewPostSuccess(user_key, content) elif process == '내일의 식단': if content in self.PROCESS[process][1]: new_msg = self.PROCESS[process][1][content] UserSessionAdmin.delete(user_key) else: UserSessionAdmin.delete(user_key) return FailMessage('내일의 식단 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.') return new_msg() return FailMessage('Unhandled process {}'.format(process)) def handle_stateless_process(self, user_key, content): """ 연속적이지 않은 항목들을 처리한다. :param user_key: :param content: :return: Message Object """ if content in self.PROCESS: UserSessionAdmin.init_process(user_key, content) new_msg = self.PROCESS[content][0][content] return new_msg() else: new_msg = self.STATELESS_PROCESS[content] return new_msg() def get_msg(self, user_key, content): has_session = UserSessionAdmin.check_user_key(user_key) process = UserSessionAdmin.get_process(user_key) if not has_session: UserSessionAdmin.init(user_key, content) if content == '취소': UserSessionAdmin.delete(user_key) return CancelMessage() UserSessionAdmin.add_history(user_key, content) if process: return self.handle_process(process, user_key, content) else: return self.handle_stateless_process(user_key, content) def process(self, stat, req=None): if stat is 'home': home_message = HomeMessage() return home_message elif stat is 'message': content = req['content'] user_key = req['user_key'] return self.get_msg(user_key, content) elif stat is 'fail': log = req['log'] user_key = req['user_key'] fail_message = FailMessage('파악할수 없는 에러가 발생하여 해당 세션을 초기화 합니다\n{}'.format(log)) UserSessionAdmin.delete(user_key) return fail_message elif stat is 'etc': return SuccessMessage() elif stat is "scheduler": return CronUpdateMessage() elif stat is "refresh_tomorrow": return CronUpdateTomorrowMessage() else: return FailMessage("stat not in list('home', 'message', 'fail')") class SessionManager(metaclass=Singleton): @staticmethod def check_user_key(user_key): if session.find_one({'user_key': user_key}): return True else: return False def verify_session(func): @wraps(func) def session_wrapper(*args, **kwargs): user_key = args[1] if session.find_one({'user_key': user_key}): return func(*args, **kwargs) else: return False return session_wrapper def init(self, user_key, content=None, process=None): session.insert_one({ 'user_key': user_key, 'history': [content], 'process': process, }) @verify_session def delete(self, user_key): session.remove({'user_key': user_key}) @verify_session def add_history(self, user_key, content): user = session.find_one({'user_key': user_key}) history = user['history'] history.append(content) user.update({'history': history}) session.save(user) @verify_session def get_history(self, user_key): user = session.find_one({'user_key': user_key}) history = user['history'] return history[:] @verify_session def init_process(self, user_key, process): user = session.find_one({'user_key': user_key}) user.update({'process': process}) session.save(user) <|fim▁hole|> session.save(user) @verify_session def get_process(self, user_key): user = session.find_one({'user_key': user_key}) return user['process'] class DBManager: def __init__(self): _conn = pymongo.MongoClient() _food_db = _conn.food_db self.hakusiku = _food_db.hakusiku self.review = _food_db.review self.ban_list = _food_db.ban_list if self._get_black_list() is None: self.ban_list.insert_one({'black_list': []}) def get_hakusiku_data(self, date=None): date = date or datetime.date.today() date_str = date.__str__() data = self.hakusiku.find_one({'날짜': date_str}) return data def set_hakusiku_data(self, data, date=None): date = date or datetime.date.today() date_str = date.__str__() if self.get_hakusiku_data(date=date_str) is None: self.hakusiku.insert_one(data) else: self.hakusiku.replace_one({"날짜": date_str}, data) def is_banned_user(self, user_key): return True if user_key in self._get_black_list() else False def _get_black_list(self): return self.ban_list.find_one({}, {'_id': 0, 'black_list': 1}) def ban_user(self, user_key): black_list = self._get_black_list() black_list.append(user_key) def get_review(self): date = datetime.date.today().__str__() data = self.review.find_one({'날짜': date}) or self.init_review() return data def init_review(self): date = datetime.date.today().__str__() self.review.insert_one({ '날짜': date, '리뷰': [], }) return self.get_review() def append_review(self, user_key: str, new_review: str): def count_user_key(lst): # TODO: mongodb 기능에 count 하는게 있을듯 그걸로 대체 s = 0 for i in lst: if i.get('user_key') == user_key: s += 1 return s def remove_special_char(src): return re.sub("[!@#$%^&*()]", "", src) review = self.get_review() if count_user_key(review['리뷰']) < 5: review['리뷰'].append({'user_key': user_key, 'content': remove_special_char(new_review)}) self.review.find_one_and_replace({'날짜': datetime.date.today().__str__()}, review) else: raise Exception('5회 이상 작성하셨습니다.') APIAdmin = APIManager() UserSessionAdmin = SessionManager() DBAdmin = DBManager()<|fim▁end|>
@verify_session def expire_process(self, user_key): user = session.find_one({'user_key': user_key}) user.update({'process': None})
<|file_name|>server.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node var app = require('./app'), config = require('./config'); <|fim▁hole|> "use strict"; console.log('Express server listening on port ' + server.address().port); });<|fim▁end|>
app.set('port', process.env.PORT || config.app.port); var server = app.listen(app.get('port'), function() {
<|file_name|>nodelist.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::NodeListBinding; use dom::bindings::js::JS; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::node::{Node, NodeHelpers}; use dom::window::Window; #[deriving(Encodable)] pub enum NodeListType { Simple(~[JS<Node>]), Children(JS<Node>) } #[deriving(Encodable)] pub struct NodeList { list_type: NodeListType, reflector_: Reflector, window: JS<Window> } impl NodeList { pub fn new_inherited(window: JS<Window>, list_type: NodeListType) -> NodeList { NodeList { list_type: list_type, reflector_: Reflector::new(), window: window } } pub fn new(window: &JS<Window>, list_type: NodeListType) -> JS<NodeList> { reflect_dom_object(~NodeList::new_inherited(window.clone(), list_type), window, NodeListBinding::Wrap) } pub fn new_simple_list(window: &JS<Window>, elements: ~[JS<Node>]) -> JS<NodeList> { NodeList::new(window, Simple(elements)) } pub fn new_child_list(window: &JS<Window>, node: &JS<Node>) -> JS<NodeList> { NodeList::new(window, Children(node.clone())) } pub fn Length(&self) -> u32 { match self.list_type { Simple(ref elems) => elems.len() as u32, Children(ref node) => node.children().len() as u32 } } pub fn Item(&self, index: u32) -> Option<JS<Node>> { match self.list_type { _ if index >= self.Length() => None, Simple(ref elems) => Some(elems[index].clone()), Children(ref node) => node.children().nth(index as uint) } } pub fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<JS<Node>> { let item = self.Item(index);<|fim▁hole|> item } } impl Reflectable for NodeList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector { &mut self.reflector_ } }<|fim▁end|>
*found = item.is_some();
<|file_name|>compiler.ts<|end_file_name|><|fim▁begin|>import {Binding, resolveForwardRef, Injectable} from 'angular2/di'; import { Type, isBlank, isPresent, BaseException, normalizeBlank, stringify, isArray, isPromise } from 'angular2/src/facade/lang'; import {Promise, PromiseWrapper} from 'angular2/src/facade/async'; import {List, ListWrapper, Map, MapWrapper} from 'angular2/src/facade/collection'; import {DirectiveResolver} from './directive_resolver'; import {AppProtoView} from './view'; import {ElementBinder} from './element_binder'; import {ProtoViewRef} from './view_ref'; import {DirectiveBinding} from './element_injector'; import {TemplateResolver} from './template_resolver'; import {View} from '../annotations_impl/view'; import {ComponentUrlMapper} from './component_url_mapper'; import {ProtoViewFactory} from './proto_view_factory'; import {UrlResolver} from 'angular2/src/services/url_resolver'; import * as renderApi from 'angular2/src/render/api'; /** * Cache that stores the AppProtoView of the template of a component. * Used to prevent duplicate work and resolve cyclic dependencies. */ @Injectable() export class CompilerCache { _cache: Map<Type, AppProtoView> = new Map(); _hostCache: Map<Type, AppProtoView> = new Map(); set(component: Type, protoView: AppProtoView): void { this._cache.set(component, protoView); } get(component: Type): AppProtoView { var result = this._cache.get(component); return normalizeBlank(result); } setHost(component: Type, protoView: AppProtoView): void { this._hostCache.set(component, protoView); } getHost(component: Type): AppProtoView { var result = this._hostCache.get(component); return normalizeBlank(result); } clear(): void { this._cache.clear(); this._hostCache.clear(); } } /** * @exportedAs angular2/view */ @Injectable() export class Compiler { private _reader: DirectiveResolver; private _compilerCache: CompilerCache; private _compiling: Map<Type, Promise<AppProtoView>>; private _templateResolver: TemplateResolver; private _componentUrlMapper: ComponentUrlMapper; private _urlResolver: UrlResolver; private _appUrl: string; private _render: renderApi.RenderCompiler; private _protoViewFactory: ProtoViewFactory; constructor(reader: DirectiveResolver, cache: CompilerCache, templateResolver: TemplateResolver, componentUrlMapper: ComponentUrlMapper, urlResolver: UrlResolver, render: renderApi.RenderCompiler, protoViewFactory: ProtoViewFactory) { this._reader = reader; this._compilerCache = cache; this._compiling = new Map(); this._templateResolver = templateResolver; this._componentUrlMapper = componentUrlMapper; this._urlResolver = urlResolver; this._appUrl = urlResolver.resolve(null, './'); this._render = render; this._protoViewFactory = protoViewFactory; } private _bindDirective(directiveTypeOrBinding): DirectiveBinding { if (directiveTypeOrBinding instanceof DirectiveBinding) { return directiveTypeOrBinding; } else if (directiveTypeOrBinding instanceof Binding) { let annotation = this._reader.resolve(directiveTypeOrBinding.token); return DirectiveBinding.createFromBinding(directiveTypeOrBinding, annotation); } else { let annotation = this._reader.resolve(directiveTypeOrBinding); return DirectiveBinding.createFromType(directiveTypeOrBinding, annotation); } } // Create a hostView as if the compiler encountered <hostcmp></hostcmp>. // Used for bootstrapping. compileInHost(componentTypeOrBinding: Type | Binding): Promise<ProtoViewRef> { var componentBinding = this._bindDirective(componentTypeOrBinding); Compiler._assertTypeIsComponent(componentBinding); var directiveMetadata = componentBinding.metadata; var hostPvPromise; var component = <Type>componentBinding.key.token; var hostAppProtoView = this._compilerCache.getHost(component); if (isPresent(hostAppProtoView)) { hostPvPromise = PromiseWrapper.resolve(hostAppProtoView); } else { hostPvPromise = this._render.compileHost(directiveMetadata) .then((hostRenderPv) => { return this._compileNestedProtoViews(componentBinding, hostRenderPv, [componentBinding]); }); } return hostPvPromise.then((hostAppProtoView) => { return new ProtoViewRef(hostAppProtoView); }); } private _compile(componentBinding: DirectiveBinding): Promise<AppProtoView>| AppProtoView { var component = <Type>componentBinding.key.token; var protoView = this._compilerCache.get(component); if (isPresent(protoView)) { // The component has already been compiled into an AppProtoView, // returns a plain AppProtoView, not wrapped inside of a Promise. // Needed for recursive components. return protoView; } var pvPromise = this._compiling.get(component);<|fim▁hole|> if (isPresent(pvPromise)) { // The component is already being compiled, attach to the existing Promise // instead of re-compiling the component. // It happens when a template references a component multiple times. return pvPromise; } var template = this._templateResolver.resolve(component); var directives = this._flattenDirectives(template); for (var i = 0; i < directives.length; i++) { if (!Compiler._isValidDirective(directives[i])) { throw new BaseException( `Unexpected directive value '${stringify(directives[i])}' on the View of component '${stringify(component)}'`); } } var boundDirectives = ListWrapper.map(directives, (directive) => this._bindDirective(directive)); var renderTemplate = this._buildRenderTemplate(component, template, boundDirectives); pvPromise = this._render.compile(renderTemplate) .then((renderPv) => { return this._compileNestedProtoViews(componentBinding, renderPv, boundDirectives); }); this._compiling.set(component, pvPromise); return pvPromise; } private _compileNestedProtoViews(componentBinding, renderPv, directives): Promise<AppProtoView>| AppProtoView { var protoViews = this._protoViewFactory.createAppProtoViews(componentBinding, renderPv, directives); var protoView = protoViews[0]; if (isPresent(componentBinding)) { var component = componentBinding.key.token; if (renderPv.type === renderApi.ViewType.COMPONENT) { // Populate the cache before compiling the nested components, // so that components can reference themselves in their template. this._compilerCache.set(component, protoView); MapWrapper.delete(this._compiling, component); } else { this._compilerCache.setHost(component, protoView); } } var nestedPVPromises = []; ListWrapper.forEach(this._collectComponentElementBinders(protoViews), (elementBinder) => { var nestedComponent = elementBinder.componentDirective; var elementBinderDone = (nestedPv: AppProtoView) => { elementBinder.nestedProtoView = nestedPv; }; var nestedCall = this._compile(nestedComponent); if (isPromise(nestedCall)) { nestedPVPromises.push((<Promise<AppProtoView>>nestedCall).then(elementBinderDone)); } else { elementBinderDone(<AppProtoView>nestedCall); } }); if (nestedPVPromises.length > 0) { return PromiseWrapper.all(nestedPVPromises).then((_) => protoView); } else { return protoView; } } private _collectComponentElementBinders(protoViews: List<AppProtoView>): List<ElementBinder> { var componentElementBinders = []; ListWrapper.forEach(protoViews, (protoView) => { ListWrapper.forEach(protoView.elementBinders, (elementBinder) => { if (isPresent(elementBinder.componentDirective)) { componentElementBinders.push(elementBinder); } }); }); return componentElementBinders; } private _buildRenderTemplate(component, view, directives): renderApi.ViewDefinition { var componentUrl = this._urlResolver.resolve(this._appUrl, this._componentUrlMapper.getUrl(component)); var templateAbsUrl = null; var styleAbsUrls = null; if (isPresent(view.templateUrl)) { templateAbsUrl = this._urlResolver.resolve(componentUrl, view.templateUrl); } else if (isPresent(view.template)) { // Note: If we have an inline template, we also need to send // the url for the component to the render so that it // is able to resolve urls in stylesheets. templateAbsUrl = componentUrl; } if (isPresent(view.styleUrls)) { styleAbsUrls = ListWrapper.map(view.styleUrls, url => this._urlResolver.resolve(componentUrl, url)); } return new renderApi.ViewDefinition({ componentId: stringify(component), templateAbsUrl: templateAbsUrl, template: view.template, styleAbsUrls: styleAbsUrls, styles: view.styles, directives: ListWrapper.map(directives, directiveBinding => directiveBinding.metadata) }); } private _flattenDirectives(template: View): List<Type> { if (isBlank(template.directives)) return []; var directives = []; this._flattenList(template.directives, directives); return directives; } private _flattenList(tree: List<any>, out: List<Type | Binding | List<any>>): void { for (var i = 0; i < tree.length; i++) { var item = resolveForwardRef(tree[i]); if (isArray(item)) { this._flattenList(item, out); } else { out.push(item); } } } private static _isValidDirective(value: Type | Binding): boolean { return isPresent(value) && (value instanceof Type || value instanceof Binding); } private static _assertTypeIsComponent(directiveBinding: DirectiveBinding): void { if (directiveBinding.metadata.type !== renderApi.DirectiveMetadata.COMPONENT_TYPE) { throw new BaseException( `Could not load '${stringify(directiveBinding.key.token)}' because it is not a component.`); } } }<|fim▁end|>
<|file_name|>security_utils.py<|end_file_name|><|fim▁begin|>######### # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved #<|fim▁hole|># # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from flask_security.utils import hash_password from cloudify.cluster_status import ( DB_STATUS_REPORTER, BROKER_STATUS_REPORTER, MANAGER_STATUS_REPORTER, MANAGER_STATUS_REPORTER_ID, BROKER_STATUS_REPORTER_ID, DB_STATUS_REPORTER_ID ) from manager_rest.storage.models import Tenant, UserTenantAssoc from manager_rest.storage import user_datastore from manager_rest.constants import ( DEFAULT_TENANT_ID, DEFAULT_TENANT_ROLE, ) ADMIN_ROLE = 'sys_admin' USER_ROLE = 'default' USER_IN_TENANT_ROLE = 'user' def get_admin_user(): return { 'username': 'admin', 'password': 'admin', 'role': ADMIN_ROLE } def get_status_reporters(): return [ { 'username': MANAGER_STATUS_REPORTER, 'password': 'password', 'role': MANAGER_STATUS_REPORTER, 'id': MANAGER_STATUS_REPORTER_ID }, { 'username': BROKER_STATUS_REPORTER, 'password': 'password', 'role': BROKER_STATUS_REPORTER, 'id': BROKER_STATUS_REPORTER_ID }, { 'username': DB_STATUS_REPORTER, 'password': 'password', 'role': DB_STATUS_REPORTER, 'id': DB_STATUS_REPORTER_ID }, ] def get_test_users(): test_users = [ { 'username': 'alice', 'password': 'alice_password', 'role': ADMIN_ROLE }, { 'username': 'bob', 'password': 'bob_password', 'role': USER_ROLE }, { 'username': 'clair', 'password': 'clair_password', 'role': USER_ROLE, 'active': False }, { 'username': 'dave', 'password': 'dave_password', 'role': USER_ROLE } ] return test_users def add_users_to_db(user_list): default_tenant = Tenant.query.get(DEFAULT_TENANT_ID) for user in user_list: role = user_datastore.find_role(user['role']) user_obj = user_datastore.create_user( username=user['username'], password=hash_password(user['password']), roles=[role] ) default_tenant_role = user_datastore.find_role(DEFAULT_TENANT_ROLE) user_obj.active = user.get('active', True) user_tenant_association = UserTenantAssoc( user=user_obj, tenant=default_tenant, role=default_tenant_role, ) user_obj.tenant_associations.append(user_tenant_association) user_datastore.commit()<|fim▁end|>
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at
<|file_name|>arg_visitor.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> use error::{Error, ResultE}; use super::osc_reader::OscReader; use super::osc_type::OscType; use super::maybe_skip_comma::MaybeSkipComma; #[derive(Debug)] pub struct ArgDeserializer<'a, R: Read + 'a> { data: Option<ArgVisitor<'a, R>>, } /// Deserializes the argument data of an OSC message. #[derive(Debug)] pub struct ArgVisitor<'a, R: Read + 'a> { read: &'a mut Take<R>, /// calling .next() on this returns the OSC char code of the next argument, /// e.g. 'i' for i32, 'f' for f32, etc. /// We store this as an iterator to avoid tracking the index of the current arg. arg_types : MaybeSkipComma<vec::IntoIter<u8>>, } impl<'a, R: Read + 'a> ArgDeserializer<'a, R> { pub fn new(read: &'a mut Take<R>) -> ResultE<Self> { Ok(Self { data: Some(ArgVisitor::new(read)?), }) } } impl<'de, 'a, R> de::Deserializer<'de> for &'a mut ArgDeserializer<'a, R> where R: Read + 'a { type Error = Error; fn deserialize_any<V>(self, visitor: V) -> ResultE<V::Value> where V: Visitor<'de> { match self.data.take() { Some(data) => visitor.visit_seq(data), // The arguments can only be deserialized once. None => Err(Error::BadFormat), } } // default serde impls don't equate units to empty sequences. fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de> { match self.data.take() { Some(mut data) => { match data.parse_next()? { // We have no arguments; decoding a unit is ok! None => visitor.visit_unit(), // Cannot deserialize a unit from a non-empty sequence! Some(_) => Err(Error::BadFormat), } }, // The arguments can only be deserialized once. None => Err(Error::BadFormat), } } fn deserialize_unit_struct<V>( self, _name: &'static str, visitor: V ) -> Result<V::Value, Self::Error> where V: Visitor<'de> { self.deserialize_unit(visitor) } // This struct only deserializes sequences; ignore all type hints. // More info: https://github.com/serde-rs/serde/blob/b7d6c5d9f7b3085a4d40a446eeb95976d2337e07/serde/src/macros.rs#L106 forward_to_deserialize_any! { bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string option seq bytes byte_buf map newtype_struct tuple_struct struct identifier tuple enum ignored_any } } impl<'a, R> ArgVisitor<'a, R> where R: Read + 'a { pub fn new(read: &'a mut Take<R>) -> ResultE<Self> { let arg_types = read.read_0term_bytes().map(|bytes| MaybeSkipComma::new(bytes.into_iter()))?; Ok(ArgVisitor { read, arg_types, }) } fn parse_next(&mut self) -> ResultE<Option<OscType>> { match self.arg_types.next() { None => Ok(None), Some(tag) => self.parse_arg(tag).map(|arg| Some(arg)), } } fn parse_arg(&mut self, typecode: u8) -> ResultE<OscType> { match typecode { b'i' => self.read.parse_i32().map(|i| { OscType::I32(i) }), b'f' => self.read.parse_f32().map(|f| { OscType::F32(f) }), b's' => self.read.parse_str().map(|s| { OscType::String(s) }), b'b' => self.read.parse_blob().map(|b| { OscType::Blob(b) }), _ => Err(Error::UnsupportedType), } } } impl<'de, 'a, R> SeqAccess<'de> for ArgVisitor<'a, R> where R: Read + 'a { type Error = Error; fn next_element_seed<T>(&mut self, seed: T) -> ResultE<Option<T::Value>> where T: DeserializeSeed<'de> { // Return None when the message has been fully parsed, // else call seed.deserialize to deserialize the next item. let value = self.parse_next()?; match value { // end of sequence None => Ok(None), Some(osc_arg) => seed.deserialize(osc_arg).map(Some), } } }<|fim▁end|>
use std::io::{Read, Take}; use std::vec; use serde::de; use serde::de::{DeserializeSeed, SeqAccess, Visitor};
<|file_name|>machine.js<|end_file_name|><|fim▁begin|>// Abacus Machine Simulator // Gaurav Manek // // machine.js compiles the parser output to an intermediate form and simulates the machine. "use strict"; // The compiler transforms the output of the parser to an intermediate form that the machine can use. // Encode integer values as negative numbers to allow JavaScript engines in browsers to optimize this away. function ENCODE_INTEGER(v) { return -v-1; } function DECODE_INTEGER(v) { return -v-1; } var DEFAULT_MAX_ITER = 10000; var MACHINE_CONSTANTS = (function () { var i = 0; return { CODE_TYPE_CALL : i++, CODE_TYPE_REGISTER : i++, CODE_TYPE_GOTO : i++, CODE_TYPE_RETURN : i++, CODE_TYPE_VALUES : i++, EXEC_RUNNING : i++, EXEC_HALTED : i++, EXEC_WAITING : i++, TEST_EQUALITY : i++, DBG_STEP_OVER : i++, DBG_STEP_INTO : i++, DBG_STEP_OUT : i++, DBG_RUN_TO_END : i++, STOP_NORMAL : i++, STOP_HALTED : i++, STOP_BREAKPOINT : i++, RUN_NORMAL : i++, RUN_RETURN : i++, RUN_ENTER : i++ }; })(); var Compiler = (function() { "use strict"; function CompilerException(text, location) { this.message = text; this.location = location; }; function abacm$except(text, location) { throw new CompilerException(text, location); } // Takes a function specification, spits out the version in an intermediate form that the machine // can directly use. function abacm$compile(fn, opts) { // Compiled form var rv = { frst: 0, // The first instruction in the function. name: fn.name, args: [], // The register to put the args into. rets: [], // The registers to return. deps: [], // Dependencies regs: [], // Registers exec: [], // Code to execute opts: opts, lineno: fn.lineno } var anchors = {}; // Anchor positions var jumpsToRewrite = []; // Takes an object that represents a function argument, // If it is a register, adds it to rv.regs. If not, it // (depending on enforceRegisters) throws an exception // or encodes it as a number. function addRegister(robj, enforceRegisters, obj) { if (robj.type == "register") { var id = "" + robj.id; var idx = rv.regs.indexOf(id); if(idx < 0) { idx = rv.regs.length; rv.regs.push(id); } return idx; } else if (enforceRegisters) { abacm$except("Register expected, but number found.", obj.lineno); } else if (robj.type == "integer") { return ENCODE_INTEGER(robj.val); } } // Get the exec number corresponding to an anchor, throwing an exception // if the anchor cannot be found. If the anchor is relative, this computes // the new line number and returns it. // The arguments are: // anc: anchor object // i: index of current line // jR: instruction at line function getAnchor(anc, i, jR) { if(!anc || anc.type == "rel") { if (!anc || anc.val == "next") { return (i+1); } else { abacm$except("Unrecognized relative anchor.", jR.lineno); } } else if(anc.type == "anchor") { if(anchors.hasOwnProperty(anc.val)) { return anchors[anc.val]; } else { abacm$except("Jumping to unrecognized anchor.", jR.lineno); } } else { abacm$except("Unrecognized anchor type.", jR.lineno); } } // Step 1: go through the arguments and return values, put them in registers; rv.args = fn.args.val.map(function(r){ return addRegister(r, true, fn); }); rv.rets = fn.return.val.map(function(r){ return addRegister(r, true, fn); }); // Step 2: go through the code and: // (a) convert registers to new registers, // (b) put all anchor positions in anchors, and // (c) log dependencies. for (var i = 0; i < fn.body.length; i++) { // Process fn.body[i] // (b) if(fn.body[i].anchor) { if(anchors.hasOwnProperty(fn.body[i].anchor.val)) { abacm$except("Multiple definition of anchor \":"+fn.body[i].anchor.val + "\".", fn.body[i].lineno); } anchors[fn.body[i].anchor.val] = rv.exec.length // Make it point to the next instruction. } // Check to see if there is any instruction here. if(fn.body[i].exec) { var e = fn.body[i].exec; if (e.type == "callandstore") { rv.exec.push({ type: MACHINE_CONSTANTS.CODE_TYPE_CALL, // The input to the function call, as registers or integers in: e.fn.args.val.map(function(r) { return addRegister(r, false, fn.body[i]); }), // The output from the function call, only registers. out:e.store.val.map(function(r) { return addRegister(r, true, fn.body[i]); }), fn: e.fn.name, lineno: fn.body[i].lineno }); var depFind = rv.deps.find(function(n){ return n.name == e.fn.name}); if (depFind) { // Check if the signature is as expected. if(depFind.in != e.fn.args.val.length || depFind.out != e.store.val.length) { abacm$except("Conflicting function signature for dependency \""+ e.fn.name + "\".", fn.body[i].lineno); } } else { rv.deps.push({name: e.fn.name, in: e.fn.args.val.length, out: e.store.val.length }); } } else if (e.type == "rchange") { var ep = { type: MACHINE_CONSTANTS.CODE_TYPE_REGISTER, // Register register: addRegister(e.register, true, fn.body[i]), // Operation increment: (e.operation=="+"), lineno: fn.body[i].lineno }; if (ep.increment) { ep.next = e.next; } else { ep.next_pos = e.npos; ep.next_zero = e.nzero; } rv.exec.push(ep); } else { abacm$except("Unknown instruction type "+e.type + "\".", fn.body[i].lineno); } } else if (fn.body[i].type == "goto") { var ep = { type: MACHINE_CONSTANTS.CODE_TYPE_GOTO, next: fn.body[i].to, lineno: fn.body[i].lineno }; rv.exec.push(ep); } } // Push the return instruction to the end. rv.exec.push({type: MACHINE_CONSTANTS.CODE_TYPE_RETURN, lineno: fn.lineno}); // Next, use the information in anchors to rewrite all the jumps. for (var i = 0; i < rv.exec.length; i++) { var jR = rv.exec[i]; if(jR.type == MACHINE_CONSTANTS.CODE_TYPE_GOTO || jR.type == MACHINE_CONSTANTS.CODE_TYPE_CALL) { jR.next = getAnchor(jR.next, i, jR); } else if (jR.type == MACHINE_CONSTANTS.CODE_TYPE_REGISTER) { if (jR.increment) { jR.next = getAnchor(jR.next, i, jR); } else { jR.next_pos = getAnchor(jR.next_pos, i, jR); jR.next_zero = getAnchor(jR.next_zero, i, jR); } } } return rv; } function abacm$compileTests(fn) { // Tests var tests = []; // Takes an object that represents a list of arguments, // all of which must be numbers, not registers. function mapValues(rlist, obj) { var rv = rlist.map(function (v) { if(v.type != "integer") abacm$except("Number expected, but register found.", obj.lineno); return v.val; }); return rv; } function testFunction(l, t) { return { type: MACHINE_CONSTANTS.CODE_TYPE_CALL, fcall: FunctionCall(l.name, mapValues(l.args.val, t), 0), lineno: t.lineno }; } function testValues(va, t) { return { type: MACHINE_CONSTANTS.CODE_TYPE_VALUES, values: mapValues(va.val, t), lineno: t.lineno }; } // For each test, store it in the new format, with // the function call in lhs, and the comparing function call // or list of values in rhs. // We enforce the "only numbers no registers" rule here. for(var i = 0; i < fn.tests.length; ++i) { var l = fn.tests[i].lhs; if(l.type != "functioncall") { abacm$except("Expected a function call on the left-hand side.", fn.tests[i].lineno); } var lhs = testFunction(l, fn.tests[i]); var r = fn.tests[i].rhs; var rhs; if(r.type == "functioncall") { rhs = testFunction(r, fn.tests[i]); } else if (r.type == "arglist"){ rhs = testValues(r, fn.tests[i]); } tests.push({ lhs: lhs, rhs: rhs, mode: MACHINE_CONSTANTS.TEST_EQUALITY, lineno: fn.tests[i].lineno }); } return tests; } function abacm$resolveGotos(fn, opts) { // Change all pointers to gotos to point to the goto target (i.e. "through" the goto.) // If an infinite loop is detected, this will throw an exception. // This does not remove the gotos themselves, that happens in a later function. fn.opts.resolveGotos = true; function resolve(i, trace) { if(!trace) trace = RepetitionDetector(); if(trace.push(i)) abacm$except("Infinite GOTO loop detected in function " + fn.name + ".", fn.exec[i].lineno); // Now we look at the instruction at i if(fn.exec[i].type == MACHINE_CONSTANTS.CODE_TYPE_GOTO) { // Okay, we have yet to find the ultimate target of // the goto chain. We return the result of resolving // the next goto along the chain, and replace the // current goto's next parameter to reduce future // computation cost. var tgt = resolve(fn.exec[i].next, trace); fn.exec[i].next = tgt; return tgt; } else { // Oh, good, we found a non-goto instruction. // We return this index as the resolved index: return i; } } fn.frst = resolve(fn.frst); abacm$mapnexts(fn, resolve); return fn; } function abacm$mapnexts(fn, mapper) { for (var i = 0; i < fn.exec.length; i++) { if(fn.exec[i].type != MACHINE_CONSTANTS.CODE_TYPE_RETURN) { if(fn.exec[i].hasOwnProperty("next")) { fn.exec[i].next = mapper(fn.exec[i].next); } else { fn.exec[i].next_pos = mapper(fn.exec[i].next_pos); fn.exec[i].next_zero = mapper(fn.exec[i].next_zero); } } } } function abacm$prune(fn, opts) { // We prune all lines that are not reachable, in any case, // from the input. // Eventually, we may support the pruning of registers // that are only present on unreachable lines. fn.opts.prune = true; var reach = fn.exec.map((v, i) => false); var stack = [fn.frst]; while(stack.length > 0) { var i = stack.pop(); // If I has yet to be marked as reachable: if(!reach[i]) { reach[i] = true; // If it's not a return instruction, add its nexts to // the stack. if(fn.exec[i].type != MACHINE_CONSTANTS.CODE_TYPE_RETURN) { if(fn.exec[i].hasOwnProperty("next")) { stack.push(fn.exec[i].next); } else { stack.push(fn.exec[i].next_pos); stack.push(fn.exec[i].next_zero); } } } } // If the return instruction cannot be reached, then we throw an exception. // This should be considered a fatal error. if(!reach[reach.length - 1]) { abacm$except("This function never exits.", fn.lineno); } // Now we use the reachability list to make a list of destination // indices for each element. var indices = []; for (var i = 0, j = 0; i < reach.length; ++i) { indices[i] = j; // If i is reachable, then the next reachable // index must be assigned the next available // number. if(reach[i]) j++; } // Now we actually rewrite the actual targets of each jump. abacm$mapnexts(fn, (i) => indices[i]); // Copy and filter. var execs = fn.exec; fn.exec = execs.filter((v, i) => reach[i]); var unr = execs.map((f) => f.lineno).filter((v, i) => !reach[i]); // There's no need to filter out the return instruction here because // if it is inaccessible, that makes this a fatal error. return { code: fn, unreachable: unr }; } function abacm$compilerManager(fn, opts) { if(!opts) opts = {}; // Perform basic compilation: var rv = { code: abacm$compile(fn, opts), tests: abacm$compileTests(fn, opts) }; if(opts.resolveGotos) { rv.code = abacm$resolveGotos(rv.code, opts); if(!opts.hasOwnProperty("prune")) opts.prune = true; } if(opts.prune) { var tmp = abacm$prune(rv.code, opts); rv.code = tmp.code; rv.unreachable = tmp.unreachable; } return rv; } return { resolveGotos: abacm$resolveGotos, prune: abacm$prune, compile: abacm$compilerManager, CompilerException: CompilerException }; })(); // A simple loop-detecting object, used to check for infinite GOTO loops // and mutual recursion. function RepetitionDetector() { "use strict"; var loop = []; function repdec$push(id) { if(loop.indexOf(id) >= 0) { return true; } loop.push(id); return false; } function repdec$getLoop(id) { var startpos = loop.indexOf(id); if(startpos < 0) { return []; } // Note use of slice, not s_p_lice, here: return [1, 2,3];//loop.slice(startpos, loop.length).concat([id]); } function repdec$pop() { var cmpid = loop.pop(); } function repdec$reset() { loop = []; } return { // Returns true if a repetition is found, // false otherwise. Once it returns true, // further pushes are disallowed. push : repdec$push, // Remove the last element in the checker. pop : repdec$pop, // Reset the repetition checker. reset: repdec$reset, // Get the loop state getLoop: repdec$getLoop }; } // Object representing a function call // _fn is the function name // _in is the argument list // _out is either zero to unrestrict the return tuple size, or // positive with the expected length of returned tuple. function FunctionCall(_fn, _in, _out) { "use strict"; // Returns true if the function name and signature match, // throws an exception if the name matches but signatures do not, // returns false otherwise. function fncall$match(other) { if(_fn != other.name) return false; if(_in.length != other.args.length || (_out > 0 && _out != other.rets.length)) throw new MachineException("Function \"" + _fn + "\" with correct name but incorrect signature found"); return true; } function fncall$tostr() { return _fn + "(" + _in.join(", ") + ")"; } return { fn: _fn, in: _in, out: _out, matches: fncall$match, toString: fncall$tostr }; } function MachineException(text, location) { this.message = text; this.location = location; }; // A Machine object represents the result of running a single function. It relies // on a MachineRunner object to handle recursion and the stack. // compiled: The code object // options: configuration options for the machine // - exceptionOnNegativeRegister : Throw an exception if a 0-valued register is decremented. function Machine(compiled, args, options) { "use strict"; function abacm$except(text, location) { throw new MachineException(text, location); } var code = compiled; var opts = options?options:{}; var registers = []; var curr = 0; var state = MACHINE_CONSTANTS.EXEC_RUNNING; var loopDetector = RepetitionDetector(); var stepCount = 0; function abacm$init(compiled, args) { // Initialize the registers for(var i = 0; i < code.regs.length; ++i) { registers.push(0); } // Check the argument array if(code.args.length != args.length) abacm$except("Incorrect number of arguments to function.", code.lineno); // Copy the arguments into the registers. for(var i = 0; i < code.args.length; ++i) { if(args[i] < 0) { abacm$except("Negative argument to function.", code.lineno); } registers[code.args[i]] = args[i]; } } // Advances the state of the machine by one step, accepts parameters: // returns: returns by the recursive function call, if one is expected. null otherwise. function abacm$next(returns) { var rv = {}; // Return value var cL = code.exec[curr]; // The line to evaluate at this step. // Check the current state and evolve the machine: if (state == MACHINE_CONSTANTS.EXEC_HALTED) { abacm$except("Attempting to run halted machine.", cL.lineno); } else if (state == MACHINE_CONSTANTS.EXEC_WAITING) { // We've been invoked after sending a request to call a function. // Make sure that we have the desired values. if(cL.type != MACHINE_CONSTANTS.CODE_TYPE_CALL) abacm$except("Internal error, in EXEC_WAITING state but not at a function.", cL.lineno); if(!returns) abacm$except("Expected return values from function call.", cL.lineno); if(returns.length != cL.out.length) abacm$except("Expected " + cL.out.length + " return values from function call, " + returns.length + " received.", cL.lineno); // Now we copy the returned values to the appropriate registers for(var i = 0; i < returns.length; ++i) { if(returns[i] < 0) abacm$except("Negative value returned by " + cL.fn + " return values :" + ", ".join(returns) + ".", cL.lineno); registers[cL.out[i]] = returns[i]; } // Excellent, we're done now! We advance `curr` to the next state and change `state`: curr = cL.next; state = MACHINE_CONSTANTS.EXEC_RUNNING; } else if (state == MACHINE_CONSTANTS.EXEC_RUNNING) { stepCount++; // We're expecting an null value for returns, so we enforce that. if(returns) abacm$except("Expected no return values.", cL.lineno); // Use the loopDetector to check if we have visited this state before, // without going through a branching jump. // Since the only branching jump is with register subtractions, we reset // loopDetector there. if(loopDetector.push(curr)) abacm$except("Infinite loop detected in code, see lines " + loopDetector.getLoop(curr).join(", ") + ".", cL.lineno); // We look at the current state and figure out what to do next based on this. if (cL.type == MACHINE_CONSTANTS.CODE_TYPE_CALL) { // Oh, goody, we need to call a function. var fnArgs = []; // Populate fncall.in with the values of various argument for(var i=0; i<cL.in.length; i++) { if(cL.in[i] < 0) { // If this is a value argument, decode it. fnArgs.push(DECODE_INTEGER(cL.in[i])); } else { // If this is a register argument, copy the value. fnArgs.push(registers[cL.in[i]]); } } // Put this in the return value. rv.functioncall = FunctionCall(cL.fn, fnArgs, cL.out.length); // Change the state to WAITING state = MACHINE_CONSTANTS.EXEC_WAITING; // We don't change the pointer curr yet, that happens // upon function return. } else if (cL.type == MACHINE_CONSTANTS.CODE_TYPE_GOTO) { curr = cL.next; // Go to the given line. } else if (cL.type == MACHINE_CONSTANTS.CODE_TYPE_REGISTER) { // Check if need to increment or decrement: if(cL.increment) { // Increment registers[cL.register]++; curr = cL.next; } else { // Decrement if(opts.exceptionOnNegativeRegister && registers[cL.register] == 0) abacm$except("Decrementing the zero-valued register [" + cL.register + "]", cL.lineno); // Branch depending on the value of the register if (registers[cL.register] == 0) { curr = cL.next_zero; } else { curr = cL.next_pos; } // Decrement the register if positive if (registers[cL.register] > 0) registers[cL.register]--; // Reset the infinite loop detection, because we've found a branching instruction: loopDetector.reset(); } } else if (cL.type == MACHINE_CONSTANTS.CODE_TYPE_RETURN) { // Oh, goody! We're done with this function. We return values in // rv.retval; rv.retval = []; for(var i = 0; i < code.rets.length; ++i) rv.retval.push(registers[code.rets[i]]); // And we change the state to HALTED state = MACHINE_CONSTANTS.EXEC_HALTED; } else if (cL.type == MACHINE_CONSTANTS.CODE_TYPE_VALUES) { // Wait, what? How did this ever make it all the way to the machine? abacm$except("Unexpected line type: values.", cL.lineno); } else { abacm$except("Unexpected line type.", cL.lineno); } } // Incorporate the state into the return value. rv.state = state; rv.lineno = code.exec[curr].lineno; return rv; } function abacm$set(adj) { if(!adj) return; adj.forEach(function (v){ var z = code.regs.indexOf(v.reg); if (z < 0) { abacm$except("Trying to set value of unknown register [" + v.val + "]."); } if(v.val < 0) { abacm$except("Trying to set register [" + v.val + "] to illegal value " + v.val + "."); } registers[z] = v.val; }); } function abacm$state() { // Output the current state in a nice manner, easy for the visualization system to use. return { lineno: code.exec[curr].lineno, registers: code.regs, values: registers, state: state, name: code.name + "(" + args.join(", ") + ");", steps: stepCount } } abacm$init(compiled, args); return { step: abacm$next, getState: abacm$state, set: abacm$set }; } // Handles the stack, spins up a Machine object for each level // allfn: an array of all compiled functions, // fcall: a FunctionCall object representing the function call to make. function MachineRunner(_allfn, _fcall, _options) { "use strict"; var step = 0; var funcs = _allfn; var opts = _options; var stack = []; var state = MACHINE_CONSTANTS.EXEC_RUNNING; var recursionDetector = RepetitionDetector(); var retval = null; var startingLineNo = -1; function mrun$except(text, location) { throw new MachineException(text, location); } // Start a new function call, deepening the stack. function mrun$invokefunc(fcall) { // Check for recursion if(recursionDetector.push(fcall.fn)) mrun$except("Attempted recursion.", fcall.lineno); var f = funcs.find(fcall.matches); if(!f) mrun$except("Cannot find function \"" + fcall.fn + "\"."); // Since fcall.matches checks the function signature, we can use it without // further checking. var m = Machine(f, fcall.in, opts); stack.push(m); } function mrun$returnfunc() { recursionDetector.pop(); stack.pop(); if(stack.length == 0) { state = MACHINE_CONSTANTS.EXEC_HALTED; } } // Initializer, simply invokes the function. function mrun$init(allfn, fcall) { mrun$invokefunc(fcall); startingLineNo = mrun$getlineno(); } function mrun$next() { // Get the machine corresponding to the innermost state var m = stack[stack.length - 1]; // Advance it by one step, including the previous return value if one is set. var s = m.step(retval); retval = null; // Reset retval, if not already done. var rv = { lastAction: MACHINE_CONSTANTS.RUN_NORMAL }; if(s.state == MACHINE_CONSTANTS.EXEC_RUNNING) { // Do nothing, the machine is still running. } else if(s.state == MACHINE_CONSTANTS.EXEC_WAITING) { // s.functioncall contains the function call that m needs to continue. if(!s.functioncall) mrun$except("Machine WAITING without a pending function call.", s.lineno); rv.lastAction = MACHINE_CONSTANTS.RUN_ENTER; // Invoke the recursive function. mrun$invokefunc(s.functioncall); } else if(s.state == MACHINE_CONSTANTS.EXEC_HALTED) { // s.retval contains the returned value. if(!s.retval) mrun$except("Machine HALTED without a return value.", s.lineno); // Store the return value in retval for the next invocation. retval = s.retval; rv.lastAction = MACHINE_CONSTANTS.RUN_RETURN; // Return the function. mrun$returnfunc(); } step++; return rv; } // Returns a state descriptor, used to render the view of // the inner workings of the machine. function mrun$state(i) { if(typeof i != "undefined") { return stack[i].getState(); } return stack.map(st => st.getState()); } // Set a value in the innermost scope. function mrun$set(v) { stack[stack.length - 1].set(v); } function mrun$getlineno() { if(stack.length > 0) { return stack[stack.length - 1].getState().lineno; } else { return startingLineNo; } } // Run the machine until one of the termination conditions are met. // In the worst case, it stops at DEFAULT_MAX_ITER. function mrun$runner(options) { // options, contains: // lines: Breakpoints corresponding to line numbers, // registers: [not done yet] breakpoints corresponding to change in a particular register // stepmode: MACHINE_CONSTANTS.{DBG_STEP_OVER, DBG_STEP_INTO, DBG_STEP_OUT, DBG_RUN_TO_END} // Defaults to DBG_RUN_TO_END. // max_iter: The maximum number of iterations to run before pausing. Defaults to DEFAULT_MAX_ITER. // Make sure that this works. if(state == MACHINE_CONSTANTS.EXEC_HALTED) return { state: state, steps: step }; // Defaults: if(!options) options = {}; if(!options.max_iter) options.max_iter = DEFAULT_MAX_ITER; if(!options.stepmode) options.stepmode = MACHINE_CONSTANTS.DBG_RUN_TO_END; // Starting stack length var startStackLength = stack.length; // Ending iteration var endC = step + options.max_iter; var stopCause = MACHINE_CONSTANTS.STOP_NORMAL; while(step < endC) { var toBreak = false; var st = mrun$next(); // If the machine has halted, stop. if(state == MACHINE_CONSTANTS.EXEC_HALTED) { stopCause = MACHINE_CONSTANTS.STOP_HALTED; break; } switch(options.stepmode) { case MACHINE_CONSTANTS.DBG_STEP_INTO: // Always break. toBreak = true; break; case MACHINE_CONSTANTS.DBG_STEP_OVER: // If there is no stack length change, then we can end right here. // Otherwise, we continue until the stack returns to this length. toBreak = (stack.length <= startStackLength); break; case MACHINE_CONSTANTS.DBG_STEP_OUT: toBreak = (stack.length < startStackLength); break; case MACHINE_CONSTANTS.DBG_RUN_TO_END: default: // Do nothing, just keep going. } // Check for line number breakpoints: if(options.lines && stack.length > 0) { var cs = stack[stack.length - 1].getState(); if(options.lines.indexOf(cs.lineno) >= 0 && (st.lastAction != MACHINE_CONSTANTS.RUN_RETURN)){ toBreak = true; stopCause = MACHINE_CONSTANTS.STOP_BREAKPOINT; } } if(toBreak) break; } var rv = { state: state, steps: step, lineno: mrun$getlineno(), stop: stopCause }; // If the machine has halted, stop. if(state == MACHINE_CONSTANTS.EXEC_HALTED) { if(!retval) { mrun$except("Machine HALTED without a return value.", s.lineno); } rv.retval = retval; } return rv; } mrun$init(_allfn, _fcall); return { fcall: _fcall, run: mrun$runner, getState: mrun$state, set: mrun$set, lineno: mrun$getlineno }; } // Uses a topological sort to order tests, so that the // function that is at the very tail of the dependency // DAG is tested first. // // The _listener is called each time a test succeeds, and // notifies the UI each time a test passes or fails, and // when all tests associated with a function pass. function TestEngine(__compiledOutput, _listener) { "use strict" var tests = []; var testFunc = []; var lastInFunction = null; var ct = 0; var cp = 0; var passedAllTests = true; var prevTest = null; var listener = _listener; function tests$init(_compiledOutput) { var fn = []; var deps = []; _compiledOutput.forEach(function(v) { fn.push(v.code.name); deps.push(v.code.deps.map(function(z) { return z.name; })); }); <|fim▁hole|> while(fn.length > 0) { // Strip all functions that are not in the pending list. deps = deps.map(function (v) { return v.filter(function(z) { return fn.indexOf(z) >= 0; }); }); // There should be at least one function that has 0 dependencies // at each step, otherwise there is a cycle somewhere. var empties = deps.reduce(function(arr, v, idx) { return (v.length == 0)?arr.concat(idx):arr; }, []); if(empties.length == 0) throw new MachineException("Circular dependency detected when preparing tests."); // Prepend the functions to the list, maintaining the topological order. testFunc = empties.map(function(v) { return fn[v]; }).concat(testFunc); // Remove all corresponding elements from fn and deps. var emptyRemoveFunction = function (v, idx) { return empties.indexOf(idx) < 0; }; fn = fn.filter(emptyRemoveFunction); deps = deps.filter(emptyRemoveFunction); } // Now all functions are in testFunc, topologically sorted. tests = testFunc.map(function(fn) { return _compiledOutput.find(function(v) { return v.code.name == fn; }).tests; }); tests$removeTrailingEmpties(); // Count up the tests ct = tests.reduce(function(f,v) { return v.length + f; }, 0); } function tests$hasTest() { return (tests.length > 0); } function tests$removeTrailingEmpties(){ while(tests.length > 0 && tests[tests.length - 1].length == 0){ tests.pop(); } } function tests$nextTest() { var tt = tests[tests.length - 1]; prevTest = tt.pop(); if(tt.length == 0) { tests.pop(); lastInFunction = testFunc.pop(); } else { lastInFunction = null; } tests$removeTrailingEmpties(); return prevTest; } // Return true if we should continue, false otherwise. function tests$status(succ) { if(succ) cp++; passedAllTests = succ && passedAllTests; if(listener) { listener(prevTest, succ, lastInFunction); } return true; } function tests$passed() { return cp; } tests$init(__compiledOutput); return { hasTest: tests$hasTest, next: tests$nextTest, status: tests$status, count: ct, passed: tests$passed } } var Linker = (function(){ "use strict" var REGISTER_TMP_COPY = 0; function LinkerException(message, lineno){ this.message = message; this.lineno = lineno; } function lnkr$except(text, location){ throw new LinkerException(text, location); } function lnkr$find(allfunc, target){ var rf = allfunc.find((t) => t.name == target); if(!rf) { lnkr$except("Cannot find function \"" + target + "\""); } return rf; } // Copy from global register rF to global register rT, // assumes rT is zero. function lnkr$makePreambleCopy(rF, rT, first){ var r0 = REGISTER_TMP_COPY; var rv = [ { "type":1, "register":rF, "increment":false, "next_pos":1, "next_zero":6 }, { "type":1, "register":rT, "increment":true, "next":2 }, { "type":1, "register":r0, "increment":true, "next":3 }, { "type":1, "register":rF, "increment":false, "next_pos":1, "next_zero":4 }, { "type":1, "register":r0, "increment":false, "next_pos":5, "next_zero":6 }, { "type":1, "register":rF, "increment":true, "next":4 }]; return lnkr$makePreambleWrapper(rv, first); } // Zeros rZ. function lnkr$makePreambleZero(rZ, first){ var rv = [ { "type":1, "register":rZ, "increment":false, "next_pos":0, "next_zero":1 } ]; return lnkr$makePreambleWrapper(rv, first); } // Sets rZ to value v. function lnkr$makePreambleValue(rZ, v, first){ var rv = []; var i = 0; while(i < v) rv.push({ "type":1, "register":rZ, "increment":true, "next":++i }); return lnkr$makePreambleWrapper(rv, first); } function lnkr$makePreambleWrapper(l, first) { var idx = l.map((v, i) => lnkr$lazyIndex(-1, i + 1)); idx.unshift(first); // Temporarily add the exit pointer to idx // Switch all nexts from numbers to lazy indices. for(var i = 0; i < l.length; ++i) { if(l[i].hasOwnProperty("next")) { l[i].next = idx[l[i].next]; } else { l[i].next_pos = idx[l[i].next_pos]; l[i].next_zero = idx[l[i].next_zero]; } } var next = idx.pop(); // Sanity checks if(l.length != idx.length) lnkr$except("Wrapper broke invariant that exec and jump are of same length."); if(!next) lnkr$except("Wrapper returning null lazyIndex."); return { exec: l, jump: idx, next: next }; } function lnkr$makeGoto(dest) { return { type: MACHINE_CONSTANTS.CODE_TYPE_GOTO, next: dest }; } function lnkr$deepCopyCode(obj) { var temp = {}; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { if(Array.isArray(obj[key])) { temp[key] = obj[key].slice(0); } else { temp[key] = obj[key]; } } } return temp; } // Lazily evaluated index object. Used to // stand in for an actual index until the flattening is done. function lnkr$lazyIndex(sc, i) { return { sc: sc, i: i, pos: -1 }; } // Convert an entire execution tree into a single function. // allfunc: an array of functions, // target: the name of the function to compile, and // opts: extra options. // // Note that we do not perform any cleaning operations here, so it is important to // run `goto` resolution and pruning after this. function lnkr$link(allfunc, target, opts) { // Oh, good. Now we have the starting function. Now we prepare scope resolution. A "scope" corresponds // to the position of a function call in the final, linked, function and the registers assigned to it. var nextScope = 0; // The next scope id. var startingRegister = []; // The index of the first register of the original function in a scope. var registerMapping = []; // Map from (scope, local register) to global register. var regs = []; startingRegister.push(1); // We need a temporary copying register somewhere here. registerMapping.push(0); // Convenience functions: var getReg = (scope, i) => registerMapping[startingRegister[scope] + i]; var setReg = (scope, i, k) => registerMapping[startingRegister[scope] + i] = k; // Function Calling Convention: // Each function has a preamble that copies data from an input register in the caller's scope to // the appropriate register in the callee's scope, and zeros out registers in the caller's scope // that are to be written to. If a register is both in the caller's and callee's scope, then we // leave it alone. function flattenFunctionCode(fname, next, input_registers, output_registers, return_target) { var scope = nextScope++; // Get the next scope id for this function. var fn = lnkr$find(allfunc, fname); // Find the compiled function data. var pend = fn.exec.map(lnkr$deepCopyCode); // Commands to be processed, deep copied. var idxs = fn.exec.map((v, j) => lnkr$lazyIndex(scope, j)); // Prepare the lazy indexing object array. var exec = []; // The final sequence of exec[] commands associated with this. var jump = []; // The lazyIndex object associated with each line. // Allocate all the registers we need in this scope, and tell the next scope // where it can start allocating registers. fn.regs.forEach(function (v, j) { registerMapping.push(startingRegister[scope] + j); regs.push(scope + "_" + fn.name + "_" + fn.regs[j]); }); startingRegister.push(startingRegister[scope] + fn.regs.length); // next is the entrypoint into the function. If it is not given, then this must be the // root function call. In that case, we use the first instruction's index as the entrypoint. if(!next) { next = idxs[fn.frst]; } else { // This is not the root function call. We need to massage the registers a little. if(!Array.isArray(input_registers) || !Array.isArray(output_registers)) lnkr$except("Non-root function call without preamble data.", fn.lineno); if(input_registers.length != fn.args.length || output_registers.length != fn.rets.length) lnkr$except("Incorrect input or output register length.", fn.lineno); // Function preamble // Here's the tricky part: we need to map input_registers to fn.args and // fn.rets to output_registers. // Here are the rules for that mapping: // (1) Remap and erase output registers. // (a) We set registerMapping[(scope, fn.rets[j])] = output_registers[j] for all // output registers. (This corresponds to setting the output location.) // (b) All output_registers that ARE NOT also input_registers are zeroed // using lnkr$makePreambleZero. (Zero returning registers.) // (2) Copy various input registers. // (a) If an input_register is also an output_register, we ignore it. It has been // dealt with in (1)(a). // (b) If an input_register is non-negative, we use lnkr$makePreambleCopy to copy // the value from input_register[j] to rn.args[j]. (Pass-by-value support.) // (c) If an input_register is negative, we use lnkr$makePreambleValue to store // the corresponding value in the corresponding register in fn.args. (Integer // constants support.) // // The result of all this preamble code will be to emulate the function abstractions of // passing by value to and from a function. // // Also, next is the index to which control will be passed. We use that as the first index of // each part of the preamble and overwrite it with the returned rv.next index. // // Don't worry, this will not be on the final. :) output_registers.forEach(function (r, j) { // (a) setReg(scope, fn.rets[j], r); // (b) if(input_registers.indexOf(r) < 0) { var c = lnkr$makePreambleZero(r, next); exec = exec.concat(c.exec); jump = jump.concat(c.jump); next = c.next; } }); // (2) input_registers.forEach(function (r, j) { if(output_registers.indexOf(r) >= 0) { // (a) } else { var c; if (r >= 0) { // (b) c = lnkr$makePreambleCopy(r, getReg(scope, fn.args[j]), next); } else { // (c) c = lnkr$makePreambleValue(getReg(scope, fn.args[j]), DECODE_INTEGER(r), next); } exec = exec.concat(c.exec); jump = jump.concat(c.jump); next = c.next; } }); } // next is the entrypoint into the function, so we set the first instruction in the function // to next. idxs[fn.frst] = next; for(var i=0; i<pend.length; ++i) { if(pend[i].type == MACHINE_CONSTANTS.CODE_TYPE_RETURN) { // Oh, goody! We're done processing this function. // If there is a return target to jump to after this function, // then we put a goto there. Otherwise we leave the return function in. if(return_target) { exec.push(lnkr$makeGoto(return_target)); } else { exec.push(pend[i]); // We copy in the return function. } jump.push(idxs[i]); // Sanity check. // console.log(exec.length); if(exec.length != jump.length) { lnkr$except("Exec and Jump of different lengths."); } return { exec: exec, jump: jump }; } else { // We swap out the next jumps for lazily evaluated indices, // no matter what the type is. if(pend[i].hasOwnProperty("next")) { pend[i].next = idxs[pend[i].next]; } else { pend[i].next_pos = idxs[pend[i].next_pos]; pend[i].next_zero = idxs[pend[i].next_zero]; } switch (pend[i].type) { case MACHINE_CONSTANTS.CODE_TYPE_REGISTER: // We're accessing a register here. We map the accessed register // from the local to the global registers. pend[i].register = getReg(scope, pend[i].register); // Note: no break; here, we still need to add the line to the code. case MACHINE_CONSTANTS.CODE_TYPE_GOTO: // Add the current line to the code: exec.push(pend[i]); jump.push(idxs[i]); break; case MACHINE_CONSTANTS.CODE_TYPE_CALL: // We need to map both the input and output registers, // but leave the numbers unchanged. Numbers are stored as // negative values. var r_in = pend[i].in.map((v) => (v >= 0?getReg(scope, v):v)); var r_out = pend[i].out.map((v) => (v >= 0?getReg(scope, v):v)); var sub = flattenFunctionCode(pend[i].fn, idxs[i], r_in, r_out, pend[i].next); exec = exec.concat(sub.exec); jump = jump.concat(sub.jump); break; default: lnkr$except("Unexpected type for compiled code.", fn.lineno); } } } // We've finished running over all indices without // returning. This should not have happened. lnkr$except("Function without return.", fn.lineno); } var srcF = lnkr$find(allfunc, target); var rv = flattenFunctionCode(target); var exec = rv.exec; var line = rv.jump; // Now we update the lazyIndices with the actual line number: line.forEach((l, i) => l.pos = i); // And we swap each lazyIndex with the position: exec.forEach(function(e) { if(e.type != MACHINE_CONSTANTS.CODE_TYPE_RETURN) { if(e.hasOwnProperty("next")) { e.next = e.next.pos; } else { e.next_pos = e.next_pos.pos; e.next_zero = e.next_zero.pos; } } }); // ...and we're done! return { "frst": srcF.frst, "name": srcF.name + "_compiled", "args": srcF.args.map((r) => r + startingRegister[0]), "rets": srcF.rets.map((r) => r + startingRegister[0]), "deps": [], "regs": regs, "exec": exec, "opts": {"linked":true} }; } return { LinkerException: LinkerException, link: lnkr$link }; })();<|fim▁end|>
<|file_name|>OpenGLESPage.xaml.cpp<|end_file_name|><|fim▁begin|>/* * cocos2d-x http://www.cocos2d-x.org * * Copyright (c) 2010-2014 - cocos2d-x community * Copyright (c) 2017-2018 Xiamen Yaji Software Co., Ltd. * * Portions Copyright (c) Microsoft Open Technologies, Inc. * All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. */ #include "App.xaml.h" #include "OpenGLESPage.xaml.h" using namespace CocosAppWinRT; using namespace cocos2d; using namespace Platform; using namespace Concurrency; using namespace Windows::Foundation; using namespace Windows::Graphics::Display; using namespace Windows::System::Threading; using namespace Windows::UI::Core; using namespace Windows::UI::Input; using namespace Windows::UI::Xaml; using namespace Windows::UI::Xaml::Controls; using namespace Windows::UI::Xaml::Controls::Primitives; using namespace Windows::UI::Xaml::Data; using namespace Windows::UI::Xaml::Input; using namespace Windows::UI::Xaml::Media; using namespace Windows::UI::Xaml::Navigation; #if (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP) || _MSC_VER >= 1900 using namespace Windows::Phone::UI::Input; #endif OpenGLESPage::OpenGLESPage() : OpenGLESPage(nullptr) { } OpenGLESPage::OpenGLESPage(OpenGLES* openGLES) : mOpenGLES(openGLES), mRenderSurface(EGL_NO_SURFACE), mCoreInput(nullptr), mDpi(0.0f), mDeviceLost(false), mCursorVisible(true), mVisible(false), mOrientation(DisplayOrientations::Landscape) { InitializeComponent(); Windows::UI::Core::CoreWindow^ window = Windows::UI::Xaml::Window::Current->CoreWindow; window->VisibilityChanged += ref new Windows::Foundation::TypedEventHandler<Windows::UI::Core::CoreWindow^, Windows::UI::Core::VisibilityChangedEventArgs^>(this, &OpenGLESPage::OnVisibilityChanged); window->KeyDown += ref new TypedEventHandler<CoreWindow^, KeyEventArgs^>(this, &OpenGLESPage::OnKeyPressed); window->KeyUp += ref new TypedEventHandler<CoreWindow^, KeyEventArgs^>(this, &OpenGLESPage::OnKeyReleased); window->CharacterReceived += ref new TypedEventHandler<CoreWindow^, CharacterReceivedEventArgs^>(this, &OpenGLESPage::OnCharacterReceived); DisplayInformation^ currentDisplayInformation = DisplayInformation::GetForCurrentView(); currentDisplayInformation->OrientationChanged += ref new TypedEventHandler<DisplayInformation^, Object^>(this, &OpenGLESPage::OnOrientationChanged); mOrientation = currentDisplayInformation->CurrentOrientation; this->Loaded += ref new Windows::UI::Xaml::RoutedEventHandler(this, &OpenGLESPage::OnPageLoaded); #if _MSC_VER >= 1900 if (Windows::Foundation::Metadata::ApiInformation::IsTypePresent("Windows.UI.ViewManagement.StatusBar")) { Windows::UI::ViewManagement::StatusBar::GetForCurrentView()->HideAsync(); } if (Windows::Foundation::Metadata::ApiInformation::IsTypePresent("Windows.Phone.UI.Input.HardwareButtons")) { HardwareButtons::BackPressed += ref new EventHandler<BackPressedEventArgs^>(this, &OpenGLESPage::OnBackButtonPressed); } #else #if (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP) Windows::UI::ViewManagement::StatusBar::GetForCurrentView()->HideAsync(); HardwareButtons::BackPressed += ref new EventHandler<BackPressedEventArgs^>(this, &OpenGLESPage::OnBackButtonPressed); #else // Disable all pointer visual feedback for better performance when touching. // This is not supported on Windows Phone applications. auto pointerVisualizationSettings = Windows::UI::Input::PointerVisualizationSettings::GetForCurrentView(); pointerVisualizationSettings->IsContactFeedbackEnabled = false; pointerVisualizationSettings->IsBarrelButtonFeedbackEnabled = false; #endif #endif CreateInput(); } void OpenGLESPage::CreateInput() { // Register our SwapChainPanel to get independent input pointer events auto workItemHandler = ref new WorkItemHandler([this](IAsyncAction ^) { // The CoreIndependentInputSource will raise pointer events for the specified device types on whichever thread it's created on. mCoreInput = swapChainPanel->CreateCoreIndependentInputSource( Windows::UI::Core::CoreInputDeviceTypes::Mouse | Windows::UI::Core::CoreInputDeviceTypes::Touch | Windows::UI::Core::CoreInputDeviceTypes::Pen ); // Register for pointer events, which will be raised on the background thread. mCoreInput->PointerPressed += ref new TypedEventHandler<Object^, PointerEventArgs^>(this, &OpenGLESPage::OnPointerPressed); mCoreInput->PointerMoved += ref new TypedEventHandler<Object^, PointerEventArgs^>(this, &OpenGLESPage::OnPointerMoved); mCoreInput->PointerReleased += ref new TypedEventHandler<Object^, PointerEventArgs^>(this, &OpenGLESPage::OnPointerReleased); mCoreInput->PointerWheelChanged += ref new TypedEventHandler<Object^, PointerEventArgs^>(this, &OpenGLESPage::OnPointerWheelChanged); if (GLViewImpl::sharedOpenGLView() && !GLViewImpl::sharedOpenGLView()->isCursorVisible()) { mCoreInput->PointerCursor = nullptr; } // Begin processing input messages as they're delivered. mCoreInput->Dispatcher->ProcessEvents(CoreProcessEventsOption::ProcessUntilQuit); }); // Run task on a dedicated high priority background thread. mInputLoopWorker = ThreadPool::RunAsync(workItemHandler, WorkItemPriority::High, WorkItemOptions::TimeSliced); } OpenGLESPage::~OpenGLESPage() { StopRenderLoop(); DestroyRenderSurface(); } void OpenGLESPage::OnPageLoaded(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e) { // The SwapChainPanel has been created and arranged in the page layout, so EGL can be initialized. CreateRenderSurface(); StartRenderLoop(); mVisible = true; } void OpenGLESPage::CreateRenderSurface() { if (mOpenGLES && mRenderSurface == EGL_NO_SURFACE) { // The app can configure the SwapChainPanel which may boost performance. // By default, this template uses the default configuration. mRenderSurface = mOpenGLES->CreateSurface(swapChainPanel, nullptr, nullptr); // You can configure the SwapChainPanel to render at a lower resolution and be scaled up to // the swapchain panel size. This scaling is often free on mobile hardware. // // One way to configure the SwapChainPanel is to specify precisely which resolution it should render at. // Size customRenderSurfaceSize = Size(800, 600); // mRenderSurface = mOpenGLES->CreateSurface(swapChainPanel, &customRenderSurfaceSize, nullptr); // // Another way is to tell the SwapChainPanel to render at a certain scale factor compared to its size. // e.g. if the SwapChainPanel is 1920x1280 then setting a factor of 0.5f will make the app render at 960x640 // float customResolutionScale = 0.5f; // mRenderSurface = mOpenGLES->CreateSurface(swapChainPanel, nullptr, &customResolutionScale); // } } void OpenGLESPage::DestroyRenderSurface() { if (mOpenGLES) { mOpenGLES->DestroySurface(mRenderSurface); } mRenderSurface = EGL_NO_SURFACE; } void OpenGLESPage::RecoverFromLostDevice() { critical_section::scoped_lock lock(mRenderSurfaceCriticalSection); DestroyRenderSurface(); mOpenGLES->Reset(); CreateRenderSurface(); std::unique_lock<std::mutex> locker(mSleepMutex); mDeviceLost = false; mSleepCondition.notify_one(); } void OpenGLESPage::TerminateApp() { { critical_section::scoped_lock lock(mRenderSurfaceCriticalSection); if (mOpenGLES) { mOpenGLES->DestroySurface(mRenderSurface); mOpenGLES->Cleanup(); }<|fim▁hole|>void OpenGLESPage::StartRenderLoop() { // If the render loop is already running then do not start another thread. if (mRenderLoopWorker != nullptr && mRenderLoopWorker->Status == Windows::Foundation::AsyncStatus::Started) { return; } DisplayInformation^ currentDisplayInformation = DisplayInformation::GetForCurrentView(); mDpi = currentDisplayInformation->LogicalDpi; auto dispatcher = Windows::UI::Xaml::Window::Current->CoreWindow->Dispatcher; // Create a task for rendering that will be run on a background thread. auto workItemHandler = ref new Windows::System::Threading::WorkItemHandler([this, dispatcher](Windows::Foundation::IAsyncAction ^ action) { mOpenGLES->MakeCurrent(mRenderSurface); GLsizei panelWidth = 0; GLsizei panelHeight = 0; mOpenGLES->GetSurfaceDimensions(mRenderSurface, &panelWidth, &panelHeight); if (mRenderer.get() == nullptr) { mRenderer = std::make_shared<Cocos2dRenderer>(panelWidth, panelHeight, mDpi, mOrientation, dispatcher, swapChainPanel); } mRenderer->Resume(); while (action->Status == Windows::Foundation::AsyncStatus::Started) { if (!mVisible) { mRenderer->Pause(); } // wait until app is visible again or thread is cancelled while (!mVisible) { std::unique_lock<std::mutex> lock(mSleepMutex); mSleepCondition.wait(lock); if (action->Status != Windows::Foundation::AsyncStatus::Started) { return; // thread was cancelled. Exit thread } if (mVisible) { mRenderer->Resume(); } else // spurious wake up { continue; } } mOpenGLES->GetSurfaceDimensions(mRenderSurface, &panelWidth, &panelHeight); mRenderer.get()->Draw(panelWidth, panelHeight, mDpi, mOrientation); // Recreate input dispatch if (GLViewImpl::sharedOpenGLView() && mCursorVisible != GLViewImpl::sharedOpenGLView()->isCursorVisible()) { CreateInput(); mCursorVisible = GLViewImpl::sharedOpenGLView()->isCursorVisible(); } if (mRenderer->AppShouldExit()) { // run on main UI thread swapChainPanel->Dispatcher->RunAsync(Windows::UI::Core::CoreDispatcherPriority::High, ref new DispatchedHandler([=]() { TerminateApp(); })); return; } EGLBoolean result = GL_FALSE; { critical_section::scoped_lock lock(mRenderSurfaceCriticalSection); result = mOpenGLES->SwapBuffers(mRenderSurface); } if (result != GL_TRUE) { // The call to eglSwapBuffers was not be successful (i.e. due to Device Lost) // If the call fails, then we must reinitialize EGL and the GL resources. mRenderer->Pause(); mDeviceLost = true; // XAML objects like the SwapChainPanel must only be manipulated on the UI thread. swapChainPanel->Dispatcher->RunAsync(Windows::UI::Core::CoreDispatcherPriority::High, ref new Windows::UI::Core::DispatchedHandler([=]() { RecoverFromLostDevice(); }, CallbackContext::Any)); // wait until OpenGL is reset or thread is cancelled while (mDeviceLost) { std::unique_lock<std::mutex> lock(mSleepMutex); mSleepCondition.wait(lock); if (action->Status != Windows::Foundation::AsyncStatus::Started) { return; // thread was cancelled. Exit thread } if (!mDeviceLost) { mOpenGLES->MakeCurrent(mRenderSurface); // restart cocos2d-x mRenderer->DeviceLost(); } else // spurious wake up { continue; } } } } }); // Run task on a dedicated high priority background thread. mRenderLoopWorker = Windows::System::Threading::ThreadPool::RunAsync(workItemHandler, Windows::System::Threading::WorkItemPriority::High, Windows::System::Threading::WorkItemOptions::TimeSliced); } void OpenGLESPage::StopRenderLoop() { if (mRenderLoopWorker) { mRenderLoopWorker->Cancel(); std::unique_lock<std::mutex> locker(mSleepMutex); mSleepCondition.notify_one(); mRenderLoopWorker = nullptr; } } void OpenGLESPage::OnPointerPressed(Object^ sender, PointerEventArgs^ e) { bool isMouseEvent = e->CurrentPoint->PointerDevice->PointerDeviceType == Windows::Devices::Input::PointerDeviceType::Mouse; if (mRenderer) { mRenderer->QueuePointerEvent(isMouseEvent ? PointerEventType::MousePressed : PointerEventType::PointerPressed, e); } } void OpenGLESPage::OnPointerMoved(Object^ sender, PointerEventArgs^ e) { bool isMouseEvent = e->CurrentPoint->PointerDevice->PointerDeviceType == Windows::Devices::Input::PointerDeviceType::Mouse; if (mRenderer) { mRenderer->QueuePointerEvent(isMouseEvent ? PointerEventType::MouseMoved : PointerEventType::PointerMoved, e); } } void OpenGLESPage::OnPointerReleased(Object^ sender, PointerEventArgs^ e) { bool isMouseEvent = e->CurrentPoint->PointerDevice->PointerDeviceType == Windows::Devices::Input::PointerDeviceType::Mouse; if (mRenderer) { mRenderer->QueuePointerEvent(isMouseEvent ? PointerEventType::MouseReleased : PointerEventType::PointerReleased, e); } } void OpenGLESPage::OnPointerWheelChanged(Object^ sender, PointerEventArgs^ e) { bool isMouseEvent = e->CurrentPoint->PointerDevice->PointerDeviceType == Windows::Devices::Input::PointerDeviceType::Mouse; if (mRenderer && isMouseEvent) { mRenderer->QueuePointerEvent(PointerEventType::MouseWheelChanged, e); } } void OpenGLESPage::OnKeyPressed(CoreWindow^ sender, KeyEventArgs^ e) { //log("OpenGLESPage::OnKeyPressed %d", e->VirtualKey); if (mRenderer) { mRenderer->QueueKeyboardEvent(WinRTKeyboardEventType::KeyPressed, e); } } void OpenGLESPage::OnCharacterReceived(CoreWindow^ sender, CharacterReceivedEventArgs^ e) { #if 0 if (!e->KeyStatus.WasKeyDown) { log("OpenGLESPage::OnCharacterReceived %d", e->KeyCode); } #endif } void OpenGLESPage::OnKeyReleased(CoreWindow^ sender, KeyEventArgs^ e) { //log("OpenGLESPage::OnKeyReleased %d", e->VirtualKey); if (mRenderer) { mRenderer->QueueKeyboardEvent(WinRTKeyboardEventType::KeyReleased, e); } } void OpenGLESPage::OnOrientationChanged(DisplayInformation^ sender, Object^ args) { mOrientation = sender->CurrentOrientation; } void OpenGLESPage::SetVisibility(bool isVisible) { if (isVisible && mRenderSurface != EGL_NO_SURFACE) { if (!mVisible) { std::unique_lock<std::mutex> locker(mSleepMutex); mVisible = true; mSleepCondition.notify_one(); } } else { mVisible = false; } } void OpenGLESPage::OnVisibilityChanged(Windows::UI::Core::CoreWindow^ sender, Windows::UI::Core::VisibilityChangedEventArgs^ args) { if (args->Visible && mRenderSurface != EGL_NO_SURFACE) { SetVisibility(true); } else { SetVisibility(false); } } #if (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP) || _MSC_VER >= 1900 /* We set args->Handled = true to prevent the app from quitting when the back button is pressed. This is because this back button event happens on the XAML UI thread and not the cocos2d-x UI thread. We need to give the game developer a chance to decide to exit the app depending on where they are in their game. They can receive the back button event by listening for the EventKeyboard::KeyCode::KEY_ESCAPE event. The default behavior is to exit the app if the EventKeyboard::KeyCode::KEY_ESCAPE event is not handled by the game. */ void OpenGLESPage::OnBackButtonPressed(Object^ sender, BackPressedEventArgs^ args) { if (mRenderer) { mRenderer->QueueBackButtonEvent(); args->Handled = true; } } #endif<|fim▁end|>
} Windows::UI::Xaml::Application::Current->Exit(); }
<|file_name|>ImageLoadQueue.cpp<|end_file_name|><|fim▁begin|>/******************************************************************************* MPhoto - Photo viewer for multi-touch devices Copyright (C) 2010 Mihai Paslariu This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. *******************************************************************************/ #include "ImageLoadQueue.h" // synchronized against all other methods ImageLoadItem ImageLoadQueue::pop ( void ) { ImageLoadItem x; _sem.acquire(); _mutex.lock(); x = QQueue<ImageLoadItem>::front(); QQueue<ImageLoadItem>::pop_front(); _mutex.unlock(); return x; } // synchronized against all other methods ImageLoadItem ImageLoadQueue::popWithPriority ( void ) { ImageLoadItem x; _sem.acquire(); _mutex.lock(); QQueue<ImageLoadItem>::Iterator it; QQueue<ImageLoadItem>::Iterator it_max; int max_prio = -1; for ( it = this->begin(); it != this->end(); it++ ) { if ( it->priority > max_prio ) { max_prio = it->priority; it_max = it; } } x = *it_max; this->erase( it_max ); _mutex.unlock(); return x; } // synchronized only against pop void ImageLoadQueue::push ( const ImageLoadItem &x ) { _mutex.lock(); QQueue<ImageLoadItem>::push_back(x); _sem.release(); _mutex.unlock(); } // synchronized only against pop QList<ImageLoadItem> ImageLoadQueue::clear( void ) { QList<ImageLoadItem> cleared_items = QList<ImageLoadItem>(); while ( _sem.tryAcquire() ) { ImageLoadItem x; _mutex.lock(); x = QQueue<ImageLoadItem>::front(); QQueue<ImageLoadItem>::pop_front(); _mutex.unlock(); cleared_items.append( x ); } return cleared_items;<|fim▁hole|>{ _mutex.lock(); QQueue<ImageLoadItem>::Iterator it; for ( it = this->begin(); it != this->end(); it++ ) { it->priority = 0; } _mutex.unlock(); } void ImageLoadQueue::updatePriority( QImage ** dest, int priority ) { _mutex.lock(); QQueue<ImageLoadItem>::Iterator it; for ( it = this->begin(); it != this->end(); it++ ) { if ( it->destination == dest ) it->priority = priority; } _mutex.unlock(); }<|fim▁end|>
} void ImageLoadQueue::clearPriorities( void )
<|file_name|>cgc.py<|end_file_name|><|fim▁begin|># cgc.py - Curriculum Graph Converter - Dana Toribio import graphviz import os import re import subprocess import sys re_courses = re.compile('\w+\s\d+\w*') # regex for courses header = r'''from graphviz import Digraph g = Digraph('studyplan', filename='studyplan.gv') g.attr('graph', fontname='Helvetica') g.attr('node', fontname='Helvetica')''' legend = r'''c0 = Digraph('cluster_0') c0.body.append('label = "LEGEND"') c0.body.append('color=lightgrey') c0.node_attr.update(style='filled', color='white') c0.edge_attr.update(color='white') c0.node('Semester 6', color='plum') c0.node('Semester 7', color='crimson') c0.node('Semester 3', color='peachpuff') c0.node('Semester 4', color='darkseagreen') c0.node('Semester 5', color='lightblue') c0.node('Completed', color='grey') c0.node('Semester 1', color='pink') c0.node('Semester 2', color='lightsalmon') c0.node('Semester 8', color='chocolate') c0.edge('Semester 6', 'Semester 7') c0.edge('Semester 7', 'Semester 8') c0.edge('Semester 3', 'Semester 4') c0.edge('Semester 4', 'Semester 5') c0.edge('Completed', 'Semester 1') c0.edge('Semester 1', 'Semester 2') c0.body.append('label = "LEGEND"') ''' req_electives = r'''c1 = Digraph('cluster_1') c1.body.append('color=aliceblue') c1.body.append('style=filled') c1.body.append('labelloc = "b"') c1.body.append('label = "''' req_electives_footer = "c1.body.append('label = \"" trk_electives = r''' c2 = Digraph('cluster_2') c2.body.append('color=aliceblue') c2.body.append('style=filled') c2.body.append('labelloc = "b"') c2.body.append('label = "''' trk_electives_footer = "c2.body.append('label = \"" completed_courses = '' suggestions = '' core_courses = '' elec_prereqs = '' def node(value): return ".node('" + value + "')\n" def prereq_edge(node1, node2, crit): if crit is True: return ".edge('" + node1 + "', '" + node2 + "', color='red')\n" else: return ".edge('" + node1 + "', '" + node2 + "')\n" def coreq_edge(node1, node2, crit): if crit is True: return ".edge('" + node1 + "', '" + node2 + "', '', arrowhead='dot', arrowtail='dot', dir='both', color='red')\n" else: return ".edge('" + node1 + "', '" + node2 + "', '', arrowhead='dot', arrowtail='dot', dir='both')\n" f = open('studyplan.txt', 'r') nf = open('studyplan.py', 'w') write_to = '' legend_index = 0 legend_color = ['pink', 'lightsalmon', 'peachpuff', 'darkseagreen', 'lightblue', 'plum', 'crimson', 'chocolate', 'goldenrod'] for line in f: if ('#' in line.split(' ')) and ('Core' in line): write_to = 'core' core_courses = core_courses + '\n' + line elif ('#' in line.split(' ')) and ('required' in line): write_to = 'req_electives' req_electives = req_electives + line[2:-1] + '"\')\n' elif ('Track' in line): write_to = 'trk_electives' trk_electives = trk_electives + line[2:-1] + '"\')\n' elif ('#' in line.split(' ')) and ('taken' in line): write_to = 'suggestions' suggestions = suggestions + '\n' + line + "g.attr('node', style='filled', color='grey')\n" elif ('#' in line.split(' ')) and ('semester' in line): write_to = 'suggestions' suggestions = suggestions + '\n' + line + "g.attr('node', style='filled', color='" + legend_color[legend_index] + "')\n" legend_index = legend_index + 1 elif line is '\n': write_to = '' course = re_courses.findall(line) if write_to is 'core': if (course) and ('->' in line) and ('*' in line): core_courses = core_courses + 'g' + prereq_edge(course[0], course[1], True) elif (course) and ('--' in line) and ('*' in line): core_courses = core_courses + 'g' + coreq_edge(course[0], course[1], True) elif (course) and ('->' in line): core_courses = core_courses + 'g' + prereq_edge(course[0], course[1], False) elif (course) and ('--' in line): core_courses = core_courses + 'g' + coreq_edge(course[0], course[1], False) elif (course): core_courses = core_courses + 'g' + node(course[0]) elif write_to is 'req_electives': if (course) and ('->' in line): req_electives = req_electives + 'c1' + prereq_edge(course[0], course[1], False) elif (course) and ('--' in line): req_electives = req_electives + 'c1' + coreq_edge(course[0], course[1], False) elif (course): req_electives = req_electives + 'c1' + node(course[0]) elif write_to is 'trk_electives': if (course) and ('->' in line):<|fim▁hole|> elif (course) and ('--' in line): trk_electives = trk_electives + 'c2' + coreq_edge(course[0], course[1], False) elif (course): trk_electives = trk_electives + 'c2' + node(course[0]) elif (write_to is 'suggestions') and course: suggestions = suggestions + 'g' + node(course[0]) else: pass nf.write(header + '\n') nf.write(legend + '\n') nf.write(req_electives + '\n') nf.write(trk_electives + '\n') nf.write(suggestions + '\n') nf.write(core_courses + '\n') # write track prerequisites # subgraph calls nf.write('g.subgraph(c1)' + '\n') nf.write('g.subgraph(c2)' + '\n') nf.write('g.subgraph(c0)' + '\n') nf.write('g.view()') os.startfile('studyplan.py')<|fim▁end|>
trk_electives = trk_electives + 'c2' + prereq_edge(course[0], course[1], False)
<|file_name|>tests.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use assert_matches::assert_matches; use url::form_urlencoded::byte_serialize; use std::{num::NonZeroU64, panic, rc::Rc}; use crate::{ access::CopyAccessExt, db, validation::is_valid_identifier, views::{IndexAddress, IndexType, RawAccess, View, ViewWithMetadata}, Database, DbOptions, Fork, ListIndex, MapIndex, ResolvedAddress, RocksDB, TemporaryDB, }; const IDX_NAME: &str = "idx_name"; const PREFIXED_IDX: (&str, u64) = ("idx", 42); // Conversion to simplify `ResolvedAddress` instantiation for tests. This conversion // is not used in the main code, so it's intentionally placed here. impl From<(&str, u64)> for ResolvedAddress { fn from((name, id): (&str, u64)) -> Self { Self::new(name, NonZeroU64::new(id)) } } fn assert_iter<T: RawAccess>(view: &View<T>, from: u8, assumed: &[(u8, u8)]) { let mut iter = view.iter_bytes(&[from]); let mut values = Vec::new(); while let Some((k, v)) = iter.next() { values.push((k[0], v[0])); } assert_eq!(values, assumed); } fn assert_initial_state<T: RawAccess>(view: &View<T>) { assert_eq!(view.get_bytes(&[1]), Some(vec![1])); assert_eq!(view.get_bytes(&[2]), Some(vec![2])); assert_eq!(view.get_bytes(&[3]), Some(vec![3])); assert_eq!(view.get_bytes(&[4]), None); } fn test_changelog<T, I>(db: &T, address: I) where T: Database, I: Into<ResolvedAddress> + Copy, { let mut fork = db.fork(); { let mut view = View::new(&fork, address); view.put(&vec![1], vec![1]); view.put(&vec![2], vec![2]); view.put(&vec![3], vec![3]); assert_initial_state(&view); } fork.flush(); { let mut view = View::new(&fork, address); assert_initial_state(&view); view.put(&vec![1], vec![10]); view.put(&vec![4], vec![40]); view.remove(&vec![2]); assert_eq!(view.get_bytes(&[1]), Some(vec![10])); assert_eq!(view.get_bytes(&[2]), None); assert_eq!(view.get_bytes(&[3]), Some(vec![3])); assert_eq!(view.get_bytes(&[4]), Some(vec![40])); } fork.rollback(); { let view = View::new(&fork, address); assert_initial_state(&view); } fork.flush(); { let mut view = View::new(&fork, address); view.put(&vec![4], vec![40]); view.put(&vec![4], vec![41]); view.remove(&vec![2]); view.put(&vec![2], vec![20]); assert_eq!(view.get_bytes(&[1]), Some(vec![1])); assert_eq!(view.get_bytes(&[2]), Some(vec![20])); assert_eq!(view.get_bytes(&[3]), Some(vec![3])); assert_eq!(view.get_bytes(&[4]), Some(vec![41])); } fork.rollback(); { let view = View::new(&fork, address); assert_initial_state(&view); assert_eq!(view.get_bytes(&[4]), None); } View::new(&fork, address).put(&vec![2], vec![20]); fork.flush(); View::new(&fork, address).put(&vec![3], vec![30]); fork.rollback(); let view = View::new(&fork, address); assert_eq!(view.get_bytes(&[1]), Some(vec![1])); assert_eq!(view.get_bytes(&[2]), Some(vec![20])); assert_eq!(view.get_bytes(&[3]), Some(vec![3])); assert_eq!(view.get_bytes(&[4]), None); } fn _views_in_same_family<T: Database>(db: &T) { const IDX_1: (&str, u64) = ("foo", 23); const IDX_2: (&str, u64) = ("foo", 42); let mut fork = db.fork(); { let mut view1 = View::new(&fork, IDX_1); let mut view2 = View::new(&fork, IDX_2); view1.put(&vec![1], vec![10]); view1.put(&vec![2], vec![20]); assert_eq!(view1.get_bytes(&[1]), Some(vec![10])); assert_eq!(view2.get_bytes(&[1]), None); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[]); view2.put(&vec![1], vec![1]); view2.put(&vec![1], vec![2]); view2.put(&vec![2], vec![4]); view2.put(&vec![0], vec![0, 1, 2, 3]); assert_eq!(view1.get_bytes(&[1]), Some(vec![10])); assert_eq!(view2.get_bytes(&[1]), Some(vec![2])); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); } fork.flush(); { let mut view1 = View::new(&fork, IDX_1); let view2 = View::new(&fork, IDX_2); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); view1.put(&vec![2], vec![30]); view1.put(&vec![3], vec![40]); view1.put(&vec![0], vec![0]); assert_iter(&view1, 1, &[(1, 10), (2, 30), (3, 40)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); view1.remove(&vec![0]); } db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let view1 = View::new(&snapshot, IDX_1); let view2 = View::new(&snapshot, IDX_2); assert_iter(&view1, 0, &[(1, 10), (2, 30), (3, 40)]); assert_iter(&view2, 0, &[(0, 0), (1, 2), (2, 4)]); } fn test_two_mutable_borrows<T, I>(db: &T, address: I) where T: Database, I: Into<ResolvedAddress> + Copy, { let fork = db.fork(); let view1 = View::new(&fork, address); let view2 = View::new(&fork, address); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view2.get_bytes(&[0]), None); } fn test_mutable_and_immutable_borrows<T, I>(db: &T, address: I) where T: Database, I: Into<ResolvedAddress> + Copy, { let fork = db.fork(); let view1 = View::new(&fork, address); let view2 = View::new(fork.readonly(), address); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view2.get_bytes(&[0]), None); } fn test_clear_view<T, I>(db: &T, address: I) where T: Database, I: Into<ResolvedAddress> + Copy, { let fork = db.fork(); { let mut view = View::new(&fork, address); view.put(&vec![1], vec![1, 2]); view.put(&vec![2], vec![3, 4]); view.clear(); assert_eq!(view.get_bytes(&[1]), None); assert_iter(&view, 0, &[]); assert_iter(&view, 1, &[]); view.put(&vec![1], vec![5]); view.put(&vec![3], vec![6]); assert_eq!(view.get_bytes(&[1]), Some(vec![5])); assert_iter(&view, 0, &[(1, 5), (3, 6)]); assert_iter(&view, 2, &[(3, 6)]); } db.merge(fork.into_patch()).unwrap(); { let snapshot = db.snapshot(); let view = View::new(&snapshot, address); assert_eq!(view.get_bytes(&[1]), Some(vec![5])); assert_iter(&view, 0, &[(1, 5), (3, 6)]); assert_iter(&view, 2, &[(3, 6)]); } let fork = db.fork(); { let mut view = View::new(&fork, address); view.put(&vec![1], vec![3, 4]); view.clear(); view.put(&vec![4], vec![0]); view.put(&vec![3], vec![0]); assert_eq!(view.get_bytes(&[1]), None); assert_eq!(view.get_bytes(&[3]), Some(vec![0])); assert_iter(&view, 0, &[(3, 0), (4, 0)]); assert_iter(&view, 4, &[(4, 0)]); } { let view = View::new(&fork, address); assert_eq!(view.get_bytes(&[1]), None); assert_eq!(view.get_bytes(&[3]), Some(vec![0])); assert_iter(&view, 0, &[(3, 0), (4, 0)]); assert_iter(&view, 4, &[(4, 0)]); } db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let view = View::new(&snapshot, address); assert_iter(&view, 0, &[(3, 0), (4, 0)]); assert_iter(&view, 4, &[(4, 0)]); } fn test_fork_iter<T, I>(db: &T, address: I) where T: Database, I: Into<ResolvedAddress> + Copy, { let fork = db.fork(); { let view = View::new(&fork, address); let mut view = view; view.put(&vec![10], vec![10]); view.put(&vec![20], vec![20]); view.put(&vec![30], vec![30]); assert!(view.contains_raw_key(&[10])); } db.merge(fork.into_patch()).unwrap(); let fork = db.fork(); let mut view = View::new(&fork, address); assert!(view.contains_raw_key(&[10])); // Stored assert_iter(&view, 0, &[(10, 10), (20, 20), (30, 30)]); assert_iter(&view, 5, &[(10, 10), (20, 20), (30, 30)]); assert_iter(&view, 10, &[(10, 10), (20, 20), (30, 30)]); assert_iter(&view, 11, &[(20, 20), (30, 30)]); assert_iter(&view, 31, &[]); // Inserted view.put(&vec![5], vec![5]); assert_iter(&view, 0, &[(5, 5), (10, 10), (20, 20), (30, 30)]); view.put(&vec![25], vec![25]); assert_iter(&view, 0, &[(5, 5), (10, 10), (20, 20), (25, 25), (30, 30)]); view.put(&vec![35], vec![35]); assert_iter( &view, 0, &[(5, 5), (10, 10), (20, 20), (25, 25), (30, 30), (35, 35)], ); // Double inserted view.put(&vec![25], vec![23]); assert_iter( &view, 0, &[(5, 5), (10, 10), (20, 20), (25, 23), (30, 30), (35, 35)], ); view.put(&vec![26], vec![26]); assert_iter( &view, 0, &[ (5, 5), (10, 10), (20, 20), (25, 23), (26, 26), (30, 30), (35, 35), ],<|fim▁hole|> let mut view = View::new(&fork, address); view.put(&vec![10], vec![11]); assert_iter(&view, 0, &[(10, 11), (20, 20), (30, 30)]); view.put(&vec![30], vec![31]); assert_iter(&view, 0, &[(10, 11), (20, 20), (30, 31)]); // Deleted let fork = db.fork(); let mut view = View::new(&fork, address); view.remove(&vec![20]); assert_iter(&view, 0, &[(10, 10), (30, 30)]); view.remove(&vec![10]); assert_iter(&view, 0, &[(30, 30)]); view.put(&vec![10], vec![11]); assert_iter(&view, 0, &[(10, 11), (30, 30)]); view.remove(&vec![10]); assert_iter(&view, 0, &[(30, 30)]); // MissDeleted let fork = db.fork(); let mut view = View::new(&fork, address); view.remove(&vec![5]); assert_iter(&view, 0, &[(10, 10), (20, 20), (30, 30)]); view.remove(&vec![15]); assert_iter(&view, 0, &[(10, 10), (20, 20), (30, 30)]); view.remove(&vec![35]); assert_iter(&view, 0, &[(10, 10), (20, 20), (30, 30)]); } #[test] fn test_database_check_correct_version() { let db = TemporaryDB::default(); let snapshot = db.snapshot(); let view = View::new(&snapshot, ResolvedAddress::system(db::DB_METADATA)); let version: u8 = view.get(db::VERSION_NAME).unwrap(); assert_eq!(version, db::DB_VERSION); } #[test] #[should_panic(expected = "actual 2, expected 0")] fn test_database_check_incorrect_version() { let dir = tempfile::TempDir::new().unwrap(); let opts = DbOptions::default(); // Writes different version to metadata. { let db = RocksDB::open(&dir, &opts).unwrap(); let fork = db.fork(); { let mut view = View::new(&fork, ResolvedAddress::system(db::DB_METADATA)); view.put(db::VERSION_NAME, 2_u8); } db.merge(fork.into_patch()).unwrap(); } // Tries to open modified database. RocksDB::open(&dir, &opts).unwrap(); } #[test] fn fork_iter() { test_fork_iter(&TemporaryDB::new(), IDX_NAME); } #[test] fn fork_iter_prefixed() { test_fork_iter(&TemporaryDB::new(), PREFIXED_IDX); } #[test] fn changelog() { test_changelog(&TemporaryDB::new(), IDX_NAME); } #[test] fn changelog_prefixed() { test_changelog(&TemporaryDB::new(), PREFIXED_IDX); } #[test] fn multiple_views() { let db = TemporaryDB::new(); let fork = db.fork(); { // Writing to multiple views at the same time let mut view = View::new(&fork, IDX_NAME); let mut prefixed_view = View::new(&fork, PREFIXED_IDX); view.put(&vec![1], vec![10]); prefixed_view.put(&vec![1], vec![30]); view.put(&vec![2], vec![20]); view.put(&vec![3], vec![30]); prefixed_view.put(&vec![3], vec![40]); prefixed_view.put(&vec![5], vec![50]); assert_eq!(view.get_bytes(&[1]), Some(vec![10])); assert_eq!(prefixed_view.get_bytes(&[1]), Some(vec![30])); assert_iter(&view, 0, &[(1, 10), (2, 20), (3, 30)]); assert_iter(&prefixed_view, 0, &[(1, 30), (3, 40), (5, 50)]); } db.merge(fork.into_patch()).unwrap(); { // Reading from a snapshot let snapshot = db.snapshot(); let view = View::new(&snapshot, IDX_NAME); let prefixed_view = View::new(&snapshot, PREFIXED_IDX); assert_iter(&view, 0, &[(1, 10), (2, 20), (3, 30)]); assert_iter(&prefixed_view, 0, &[(1, 30), (3, 40), (5, 50)]); } let fork = db.fork(); { // Reading from one view and writing to other let view = View::new(&fork, IDX_NAME); let mut prefixed_view = View::new(&fork, PREFIXED_IDX); assert_iter(&view, 0, &[(1, 10), (2, 20), (3, 30)]); assert_iter(&prefixed_view, 0, &[(1, 30), (3, 40), (5, 50)]); prefixed_view.remove(&vec![3]); prefixed_view.put(&vec![2], vec![60]); assert_iter(&view, 0, &[(1, 10), (2, 20), (3, 30)]); assert_iter(&prefixed_view, 0, &[(1, 30), (2, 60), (5, 50)]); } } #[test] fn multiple_indexes() { let db = TemporaryDB::new(); let fork = db.fork(); { let mut list: ListIndex<_, u32> = fork.get_list(IDX_NAME); let mut map = fork.get_map(("idx", &3)); for i in 0..10 { list.push(i); if i % 2 == 0 { map.put(&i, "??".to_owned()); } } } db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let list: ListIndex<_, u32> = snapshot.get_list(IDX_NAME); let map: MapIndex<_, u32, String> = snapshot.get_map(("idx", &3)); assert_eq!(list.len(), 10); assert!(map.values().all(|val| val == "??")); let fork = db.fork(); let list: ListIndex<_, u32> = fork.get_list(IDX_NAME); let mut map = fork.get_map(("idx", &3)); for item in &list { map.put(&item, item.to_string()); } assert_eq!(map.values().count(), 10); assert!(map.iter_from(&3).all(|(k, v)| k < 10 && v == k.to_string())); } #[test] fn views_in_same_family() { const IDX_1: (&str, u64) = ("foo", 23); const IDX_2: (&str, u64) = ("foo", 42); let db = TemporaryDB::new(); let mut fork = db.fork(); { let mut view1 = View::new(&fork, IDX_1); let mut view2 = View::new(&fork, IDX_2); view1.put(&vec![1], vec![10]); view1.put(&vec![2], vec![20]); assert_eq!(view1.get_bytes(&[1]), Some(vec![10])); assert_eq!(view2.get_bytes(&[1]), None); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[]); view2.put(&vec![1], vec![1]); view2.put(&vec![1], vec![2]); view2.put(&vec![2], vec![4]); view2.put(&vec![0], vec![0, 1, 2, 3]); assert_eq!(view1.get_bytes(&[1]), Some(vec![10])); assert_eq!(view2.get_bytes(&[1]), Some(vec![2])); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); } fork.flush(); { let mut view1 = View::new(&fork, IDX_1); let view2 = View::new(&fork, IDX_2); assert_iter(&view1, 1, &[(1, 10), (2, 20)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); view1.put(&vec![2], vec![30]); view1.put(&vec![3], vec![40]); view1.put(&vec![0], vec![0]); assert_iter(&view1, 1, &[(1, 10), (2, 30), (3, 40)]); assert_iter(&view2, 1, &[(1, 2), (2, 4)]); view1.remove(&vec![0]); } db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let view1 = View::new(&snapshot, IDX_1); let view2 = View::new(&snapshot, IDX_2); assert_iter(&view1, 0, &[(1, 10), (2, 30), (3, 40)]); assert_iter(&view2, 0, &[(0, 0), (1, 2), (2, 4)]); } #[test] fn rollbacks_for_indexes_in_same_family() { use crate::ProofListIndex; fn indexes(fork: &Fork) -> (ProofListIndex<&Fork, i64>, ProofListIndex<&Fork, i64>) { let list1 = fork.get_proof_list(("foo", &1)); let list2 = fork.get_proof_list(("foo", &2)); (list1, list2) } let db = TemporaryDB::new(); let mut fork = db.fork(); { let (mut list1, mut list2) = indexes(&fork); list1.extend(-3..4); list2.extend(vec![2, 3, 5, 8]); assert_eq!( list1.iter().collect::<Vec<_>>(), vec![-3, -2, -1, 0, 1, 2, 3] ); assert_eq!(list2.iter().collect::<Vec<_>>(), vec![2, 3, 5, 8]); } fork.flush(); { let (mut list1, list2) = indexes(&fork); assert_eq!( list1.iter().collect::<Vec<_>>(), vec![-3, -2, -1, 0, 1, 2, 3] ); assert_eq!(list2.iter().collect::<Vec<_>>(), vec![2, 3, 5, 8]); list1.set(0, 3); list1.set(1, 2); assert_eq!(list1.iter().collect::<Vec<_>>(), vec![3, 2, -1, 0, 1, 2, 3]); assert_eq!(list2.iter_from(1).collect::<Vec<_>>(), vec![3, 5, 8]); } fork.rollback(); let (list1, list2) = indexes(&fork); assert_eq!( list1.iter().collect::<Vec<_>>(), vec![-3, -2, -1, 0, 1, 2, 3] ); assert_eq!(list2.iter().collect::<Vec<_>>(), vec![2, 3, 5, 8]); } #[test] fn clear_view() { test_clear_view(&TemporaryDB::new(), IDX_NAME); } #[test] fn clear_prefixed_view() { test_clear_view(&TemporaryDB::new(), PREFIXED_IDX); } #[test] fn clear_sibling_views() { const IDX_1: (&str, u64) = ("foo", 23); const IDX_2: (&str, u64) = ("foo", 42); fn assert_view_states<I: RawAccess + Copy>(db_view: I) { let view1 = View::new(db_view, IDX_1); let view2 = View::new(db_view, IDX_2); assert_eq!(view1.get_bytes(&[1]), None); assert_eq!(view1.get_bytes(&[0]), Some(vec![5])); assert_eq!(view2.get_bytes(&[0]), Some(vec![3])); assert_iter(&view1, 1, &[(3, 6)]); assert_iter(&view2, 1, &[(2, 4)]); } let db = TemporaryDB::new(); let fork = db.fork(); { let mut view1 = View::new(&fork, IDX_1); let mut view2 = View::new(&fork, IDX_2); view1.put(&vec![0], vec![1]); view1.put(&vec![1], vec![2]); view2.put(&vec![0], vec![3]); view2.put(&vec![2], vec![4]); view1.clear(); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view2.get_bytes(&[0]), Some(vec![3])); assert_iter(&view2, 0, &[(0, 3), (2, 4)]); view1.put(&vec![0], vec![5]); view1.put(&vec![3], vec![6]); } db.merge(fork.into_patch()).unwrap(); assert_view_states(&db.snapshot()); let fork = db.fork(); assert_view_states(&fork); let mut view1 = View::new(&fork, IDX_1); view1.remove(&vec![1]); view1.remove(&vec![2]); view1.remove(&vec![3]); view1.put(&vec![2], vec![7]); view1.put(&vec![1], vec![8]); assert_iter(&view1, 0, &[(0, 5), (1, 8), (2, 7)]); let mut view2 = View::new(&fork, IDX_2); view2.clear(); assert_iter(&view1, 0, &[(0, 5), (1, 8), (2, 7)]); } #[test] #[should_panic] fn two_mutable_borrows() { test_two_mutable_borrows(&TemporaryDB::new(), IDX_NAME); } #[test] #[should_panic] fn two_mutable_prefixed_borrows() { test_two_mutable_borrows(&TemporaryDB::new(), PREFIXED_IDX); } #[test] #[should_panic] fn mutable_and_immutable_borrows() { test_mutable_and_immutable_borrows(&TemporaryDB::new(), IDX_NAME); } #[test] #[should_panic] fn mutable_and_immutable_prefixed_borrows() { test_mutable_and_immutable_borrows(&TemporaryDB::new(), PREFIXED_IDX); } #[test] fn multiple_immutable_borrows_from_fork() { let db = TemporaryDB::new(); let fork = db.fork(); let view1 = View::new(fork.readonly(), IDX_NAME); let view2 = View::new(fork.readonly(), IDX_NAME); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view2.get_bytes(&[0]), None); let view1 = View::new(fork.readonly(), PREFIXED_IDX); let view2 = View::new(fork.readonly(), PREFIXED_IDX); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view2.get_bytes(&[0]), None); } #[test] fn immutable_view_from_fork_reflects_changes_in_fork() { let db = TemporaryDB::new(); let fork = db.fork(); { let mut view = View::new(&fork, IDX_NAME); view.put(&vec![1], vec![1, 2, 3, 4]); view.put(&vec![2], vec![5, 6, 7]); } { let view = View::new(fork.readonly(), IDX_NAME); assert_eq!(view.get_bytes(&[1]), Some(vec![1, 2, 3, 4])); let other_view = View::new(fork.readonly(), IDX_NAME); assert_eq!(other_view.get_bytes(&[2]), Some(vec![5, 6, 7])); } View::new(&fork, IDX_NAME).clear(); let view = View::new(fork.readonly(), IDX_NAME); assert_eq!(view.get_bytes(&[1]), None); } #[test] fn immutable_view_from_fork_reads_from_snapshot() { let db = TemporaryDB::new(); let fork = db.fork(); { let mut view = View::new(&fork, IDX_NAME); view.put(&vec![1], vec![1, 2, 3, 4]); view.put(&vec![2], vec![5, 6, 7]); } db.merge_sync(fork.into_patch()).unwrap(); let mut fork = db.fork(); View::new(&fork, IDX_NAME).put(&vec![1], vec![100]); fork.flush(); View::new(&fork, IDX_NAME).put(&vec![3], vec![200]); let fork = fork.readonly(); // Read from unflushed fork. let view = View::new(fork, IDX_NAME); assert_eq!(view.get_bytes(&[3]), Some(vec![200])); // Read from flushed fork. let view = View::new(fork, IDX_NAME); assert_eq!(view.get_bytes(&[1]), Some(vec![100])); // Read from snapshot. let other_view = View::new(fork, IDX_NAME); assert_eq!(other_view.get_bytes(&[2]), Some(vec![5, 6, 7])); } #[test] fn mutable_and_immutable_borrows_for_different_views() { let db = TemporaryDB::new(); let fork = db.fork(); let readonly = fork.readonly(); let immutable_view1 = View::new(readonly, "first"); let immutable_view2 = View::new(readonly, "second"); View::new(&fork, "third").put(&vec![1], vec![1, 2, 3]); assert_eq!(immutable_view1.get_bytes(&[1]), None); let immutable_view3 = View::new(readonly, "third"); assert_eq!(immutable_view3.get_bytes(&[1]), Some(vec![1, 2, 3])); drop(immutable_view1); View::new(&fork, "first").put(&vec![1], vec![4, 5, 6]); let immutable_view1 = View::new(readonly, "first"); assert_eq!(immutable_view1.get_bytes(&[1]), Some(vec![4, 5, 6])); assert_eq!(immutable_view2.get_bytes(&[1]), None); } #[test] fn views_based_on_rc_fork() { fn test_lifetime<T: 'static>(_: T) {} const IDX_1: (&str, u64) = ("foo", 23); const IDX_2: (&str, u64) = ("foo", 42); let db = TemporaryDB::new(); let fork = Rc::new(db.fork()); let view1 = View::new(fork.clone(), IDX_1); let view2 = View::new(fork.clone(), IDX_2); // Test that views have 'static lifetime. test_lifetime(view1); test_lifetime(view2); let mut view1 = View::new(fork.clone(), IDX_1); let mut view2 = View::new(fork.clone(), IDX_2); view1.put(&vec![0], vec![1]); view1.put(&vec![1], vec![2]); assert_eq!(view1.get_bytes(&[0]), Some(vec![1])); assert_eq!(view1.get_bytes(&[1]), Some(vec![2])); view2.put(&vec![0], vec![3]); view1.put(&vec![0], vec![3]); drop(view1); view2.put(&vec![2], vec![4]); drop(view2); { // Check that changes introduced by the both views are reflected in the fork. let mut view1 = View::new(&*fork, IDX_1); assert_eq!(view1.get_bytes(&[0]), Some(vec![3])); view1.remove(&vec![0]); let view2 = View::new(fork.clone(), IDX_2); assert_eq!(view2.get_bytes(&[2]), Some(vec![4])); } // ...and that these changes propagate to patch. let patch = Rc::try_unwrap(fork).unwrap().into_patch(); db.merge_sync(patch).unwrap(); let snapshot = db.snapshot(); let view1 = View::new(&snapshot, IDX_1); assert_eq!(view1.get_bytes(&[0]), None); assert_eq!(view1.get_bytes(&[1]), Some(vec![2])); let view2 = View::new(&snapshot, IDX_2); assert_eq!(view2.get_bytes(&[0]), Some(vec![3])); assert_eq!(view2.get_bytes(&[1]), None); assert_eq!(view2.get_bytes(&[2]), Some(vec![4])); } fn test_metadata(addr: impl Into<IndexAddress>) { let addr = addr.into(); let db = TemporaryDB::new(); // Creates the index metadata. let fork = db.fork(); ViewWithMetadata::get_or_create(&fork, &addr, IndexType::ProofMap) .map_err(drop) .unwrap(); assert!( ViewWithMetadata::get_or_create(&db.snapshot(), &addr, IndexType::ProofMap) .unwrap() .is_phantom() ); db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let view = ViewWithMetadata::get_or_create(&snapshot, &addr, IndexType::ProofMap).unwrap(); assert_eq!(view.index_type(), IndexType::ProofMap); assert!(!view.is_phantom()); let fork = db.fork(); ViewWithMetadata::get_or_create(&fork, &addr, IndexType::ProofMap) .map_err(drop) .unwrap(); } #[test] fn test_metadata_simple() { test_metadata("simple"); } #[test] fn test_metadata_index_family() { test_metadata(("family", "family_id")); } fn get_address<T: RawAccess>(view: &View<T>) -> &ResolvedAddress { match view { View::Real(inner) => &inner.address, View::Phantom => panic!("Getting address for a phantom view"), } } #[test] fn test_metadata_index_identifiers() { let db = TemporaryDB::new(); let fork = db.fork(); // Creates the first index metadata. let view: View<_> = ViewWithMetadata::get_or_create(&fork, &("simple", "family_id").into(), IndexType::Map) .map_err(drop) .unwrap() .into(); let address = get_address(&view); assert_eq!(address.name, "simple"); let id = address.id.unwrap().get(); drop(view); // Prevent "multiple mutable borrows" error later // Creates the second index metadata. let view: View<_> = ViewWithMetadata::get_or_create(&fork, &("second", "family_id").into(), IndexType::Map) .map_err(drop) .unwrap() .into(); let second_address = get_address(&view); assert_eq!(second_address.name, "second"); assert_eq!(second_address.id.unwrap().get(), id + 1); // Recreates the first index instance. let view: View<_> = ViewWithMetadata::get_or_create(&fork, &("simple", "family_id").into(), IndexType::Map) .map_err(drop) .unwrap() .into(); let recreated_address = get_address(&view); assert_eq!(recreated_address.name, "simple"); assert_eq!(recreated_address.id.unwrap().get(), id); } #[test] fn test_metadata_in_migrated_indexes() { let db = TemporaryDB::new(); let fork = db.fork(); let view: View<_> = ViewWithMetadata::get_or_create(&fork, &"simple".into(), IndexType::Map) .map_err(drop) .unwrap() .into(); let address = get_address(&view); assert_eq!(address.name, "simple"); let old_id = address.id.unwrap().get(); let mut addr = IndexAddress::from_root("simple"); addr.set_in_migration(); let view: View<_> = ViewWithMetadata::get_or_create(&fork, &addr, IndexType::List) .map_err(drop) .unwrap() .into(); let migrated_address = get_address(&view); assert_eq!(migrated_address.name, "simple"); let new_id = migrated_address.id.unwrap().get(); assert_ne!(old_id, new_id); } #[test] fn test_metadata_incorrect_index_type() { let db = TemporaryDB::new(); let fork = db.fork(); ViewWithMetadata::get_or_create(&fork, &"simple".into(), IndexType::Map) .map_err(drop) .unwrap(); ViewWithMetadata::get_or_create(&fork, &"simple".into(), IndexType::List) .map(drop) .unwrap_err(); } #[test] fn test_metadata_index_wrong_type() { use crate::{ access::{AccessError, AccessErrorKind, FromAccess}, ListIndex, }; let db = TemporaryDB::new(); let fork = db.fork(); { let mut map = fork.get_map("simple"); map.put(&1, vec![1, 2, 3]); } db.merge(fork.into_patch()).unwrap(); // Attempt to create an index with the wrong type (`List` instead of `Map`). let snapshot = db.snapshot(); let err = ListIndex::<_, Vec<u8>>::from_access(&snapshot, "simple".into()).unwrap_err(); assert_matches!( err, AccessError { ref addr, kind: AccessErrorKind::WrongIndexType { .. } } if *addr == IndexAddress::from("simple") ); } #[test] fn test_valid_tombstone() { use crate::{ access::{AccessErrorKind, FromAccess}, migration::Migration, ListIndex, }; let db = TemporaryDB::new(); let fork = db.fork(); let migration = Migration::new("foo", &fork); // Valid tombstone in a fork. migration.create_tombstone("bar"); // Check that index cannot be reinterpreted with another type. let err = ListIndex::<_, u64>::from_access(migration, "bar".into()).unwrap_err(); assert_matches!( err.kind, AccessErrorKind::WrongIndexType { actual, .. } if actual == IndexType::Tombstone ); // ...even after the fork is merged. db.merge(fork.into_patch()).unwrap(); let snapshot = db.snapshot(); let migration = Migration::new("foo", &snapshot); let err = ListIndex::<_, u64>::from_access(migration, "bar".into()).unwrap_err(); assert_matches!( err.kind, AccessErrorKind::WrongIndexType { actual, .. } if actual == IndexType::Tombstone ); } #[test] fn test_invalid_tombstone() { use crate::access::{Access, AccessErrorKind}; let db = TemporaryDB::new(); let fork = db.fork(); // A tombstone cannot be created outside the migration! let err = fork .get_or_create_view("foo.bar".into(), IndexType::Tombstone) .unwrap_err(); assert_matches!(err.kind, AccessErrorKind::InvalidTombstone); } #[test] fn valid_index_name() { assert!(check_valid_name("index_name")); assert!(check_valid_name("_index_name")); assert!(check_valid_name("AinDex_name_")); assert!(check_valid_name("core.index_name1Z")); assert!(check_valid_name("configuration.indeX_1namE")); assert!(check_valid_name("1index_Name")); assert!(check_valid_name("index-name")); assert!(check_valid_name("_index-name")); assert!(check_valid_name("indeX_1namE-")); assert!(!check_valid_name( "\u{438}\u{43d}\u{434}\u{435}\u{43a}\u{441}_name_" )); assert!(!check_valid_name("core.index_\u{438}\u{43c}\u{44f}3")); assert!(!check_valid_name("1in!dex_Name")); assert!(!check_valid_name("space name")); assert!(!check_valid_name(" space ")); } #[test] fn valid_name_for_url() { assert_valid_name_url("service_name"); assert_valid_name_url("service_name\\"); assert_valid_name_url("service name"); assert_valid_name_url("/service_name"); assert_valid_name_url("1Service_name"); assert_valid_name_url("core.service_name"); } #[test] #[should_panic(expected = "Invalid characters used in name")] fn invalid_name_panic() { let db = TemporaryDB::new(); let fork = db.fork(); let _res: ListIndex<_, u8> = fork.get_list("ind\u{435}x-name"); } fn assert_valid_name_url(name: &str) { let urlencoded: String = byte_serialize(name.as_bytes()).collect(); assert_eq!(is_valid_identifier(name), name == urlencoded); } fn check_valid_name(name: &str) -> bool { let db = TemporaryDB::new(); let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| { let fork = db.fork(); let _res: ListIndex<_, u8> = fork.get_list(name.as_ref()); })); catch_result.is_ok() } #[test] fn fork_from_patch() { let db = TemporaryDB::new(); let fork = db.fork(); { let mut index = fork.get_list("index"); index.push(1); index.push(2); index.push(3); let last = index.pop(); assert_eq!(last, Some(3)); index.set(1, 5); } let patch = fork.into_patch(); let fork: Fork = patch.into(); { let index = fork.get_list("index"); assert_eq!(index.get(0), Some(1)); assert_eq!(index.get(1), Some(5)); assert_eq!(index.get(2), None); let items: Vec<i32> = index.iter().collect(); assert_eq!(items.len(), 2); assert_eq!(items, vec![1, 5]); } db.merge(fork.into_patch()) .expect("Fork created from patch should be merged successfully"); }<|fim▁end|>
); // Replaced let fork = db.fork();
<|file_name|>templates.js<|end_file_name|><|fim▁begin|>MetaclicUtils.Templates = {}; MetaclicUtils.Templates.datasets = [ ' {{#ifCond sort "!=" false}}', ' <div class="result-sort"><label>Trier par</label>', ' <select name="sort" class="form-control">', ' {{#each sortTypes}}', <|fim▁hole|> ' <option value="{{id}}">{{name}}</option>', ' {{/ifCond}}', ' {{/each}}', ' </select>', ' <a href="#" class="sortdirection">', ' {{#ifCond sortDesc "==" true}}', ' <i class="fa fa-sort-alpha-desc"></i>', ' {{else}}', ' <i class="fa fa-sort-alpha-asc"></i>', ' {{/ifCond}}', ' </a>', '</div>', '{{/ifCond}}', '<div class="result-count">{{ total }} résultat(s)</div>', '<div class="metaclic-row">', '{{#ifCond facets "!=" undefined}}', '<div class="Metaclic-results">', '{{else}}', '<div class="Metaclic-results Metaclic-results-full">', '{{/ifCond}}', ' <ul class="search-results">', ' {{#each data}}', ' <li class="search-result dataset-result" data-dataset="{{id}}">', ' <a href="{{ page }}" title="{{ organization.name }}" data-dataset="{{id}}">', '', ' <div class="result-logo">', ' <img alt="" src="{{organization.logo}}" >', ' </div>', ' {{#if organization.public_service }}', ' <img alt="certified"', ' class="certified" rel="popover"', ' data-title="{{_ \'certified_public_service\'}}"', ' data-content="{{_ \'the_identity_of_this_public_service_public_is_certified_by_etalab\'}}"', ' data-container="body" data-trigger="hover"/>', ' {{/if}}', ' <div class="result-body">', ' <h4 class="result-title">{{title}}</h4>', '', ' <div class="result-description">', ' {{mdshort description 128}}</div></div>', '', ' </a><ul class="result-infos">', '', ' {{#if temporal_coverage }}', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'temporal_coverage\'}}">', ' <span class="fa fa-calendar fa-fw"></span>', ' {{dt temporal_coverage.start format=\'L\' }} {{_ \'to\'}} {{dt temporal_coverage.end format=\'L\' }}', ' </span>', ' </li>', ' {{/if}}', '', ' {{#if frequency }}', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Update frequency\' }}">', ' <span class="fa fa-clock-o fa-fw"></span>', ' {{_ frequency }}', ' </span>', ' </li>', ' {{/if}}', '', ' {{#if spatial.territories }}', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Spatial coverage\'}}">', ' <span class="fa fa-map-marker fa-fw"></span>', ' {{_ spatial.territories.0.name }}', ' </span>', ' </li>', ' {{/if}}', '', ' {{#if spatial.granularity }}', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Spatial granularity\'}}">', ' <span class="fa fa-bullseye fa-fw"></span>', ' {{_ spatial.granularity }}', ' </span>', ' </li>', ' {{/if}}', '', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Reuses\'}}">', ' <span class="fa fa-retweet fa-fw"></span>', ' {{default metrics.reuses 0 }}', ' </span>', ' </li>', '', ' <li>', ' <span rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Followers\'}}">', ' <span class="fa fa-star fa-fw"></span>', ' {{default metrics.followers 0 }}', ' </span>', ' </li>', '', ' </ul>', ' </li>', ' {{/each}}', ' </ul>', '</div>', '{{#ifCond facets "!=" undefined}}', '<div class="Metaclic-facets">', '{{#ifCond facets.organization "!=" undefined}}', '{{#if facets.organization}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-tags fa-fw"></i> Organisme</div>', ' <ul data-limitlist=5>', ' {{#each facets.organization}}', ' <a href="#" data-addID="{{this.[0]._id.$oid}}">', ' <span>{{this.[1]}}</span>', ' {{this.[0].name}}', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', '{{#ifCond facets.tag "!=" undefined}}', '{{#if facets.tag}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-tags fa-fw"></i> Tags</div>', ' <ul data-limitlist=5>', ' {{#each facets.tag}}', ' <a href="#" data-addTag="{{this.[0]}}">', ' <span>{{this.[1]}}</span>', ' {{this.[0]}}', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', '{{#ifCond facets.license "!=" undefined}}', '{{#if facets.license}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-copyright fa-fw"></i> Licences</div>', ' <ul data-limitlist=5>', ' {{#each facets.license}}', ' <a href="#" data-addLicense="{{this.[0]._id}}">', ' <span>{{this.[1]}}</span>', ' {{this.[0].title}}', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', //couverture temporelle '{{#ifCond facets.geozone "!=" undefined}}', '{{#if facets.geozone}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-map-marker fa-fw"></i> Couverture spatiale</div>', ' <ul data-limitlist=5>', ' {{#each facets.geozone}}', ' <a class="geozone-to-load" href="#" data-addGeozone="{{this.[0]._id}}">', ' <span>{{this.[1]}}</span>', ' {{this.[0]._id}} ({{this.[0].code}})', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', '{{#ifCond facets.granularity "!=" undefined}}', '{{#if facets.granularity}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-bullseye fa-fw"></i> Granularité territoriale</div>', ' <ul data-limitlist=5>', ' {{#each facets.granularity}}', ' <a href="#" data-addGranularity="{{this.[0]}}">', ' <span>{{this.[1]}}</span>', ' {{_ this.[0]}}', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', '{{#ifCond facets.format "!=" undefined}}', '{{#if facets.format}}', '<div class="facet-panel">', ' <div class="facet-panel-heading"><i class="fa fa-file fa-fw"></i> Formats</div>', ' <ul data-limitlist=5>', ' {{#each facets.format}}', ' <a href="#" data-addFormat="{{this.[0]}}">', ' <span>{{this.[1]}}</span>', ' {{_ this.[0]}}', ' </a>', ' {{/each}}', ' </ul>', '</div>', '{{/if}}', '{{/ifCond}}', // reuse '</div>', '{{/ifCond}}', '</div>', ' <div class="metaclic-pagination">', ' {{{ paginate page total page_size }}}', ' </div>', ]; MetaclicUtils.Templates.dataset = [ '<div class="dataset" data-dataset="{{id}}">', '', ' <div class=\'dataset-info\'>', ' <blockquote>{{md description }}</blockquote>', ' {{#if extras.remote_url}}', ' <a class="site_link" href="{{extras.remote_url}}" target=_blank>', ' Voir le site original', ' </a>', ' {{/if}}', ' <p class="published_on">', ' {{_ \'published_on\' }} {{dt created_at}}', ' {{_ \'and_modified_on\'}} {{dt last_modified}}', ' {{_ \'by\'}} <a title="{{organization.name}}" href="{{organization.page}}">{{organization.name}}</a>', ' </p>', ' </div>', '', ' <div class="resources-list">', ' <h3>{{_ \'Resources\'}}</h3>', ' {{#each resources}}', ' <div data-checkurl="/api/1/datasets/checkurl/" itemtype="http://schema.org/DataDownload" itemscope="itemscope" id="resource-{{id}}">', '', ' <a href="{{url}}" data-size="{{filesize}}" data-format="{{uppercase format}}" data-map_title="{{../title}}" data-title="{{title}}" data-id="{{id}}" itemprop="url" target=_blank>', ' <h4>', ' <span data-format="{{uppercase format}}">', ' {{uppercase format}}', ' </span>', ' {{title}}', ' <p>', ' Dernière modification le {{dt last_modified}}', ' </p>', ' </h4>', ' </a>', '', ' </div>', ' {{/each}}', ' </div>', '', ' <div class="meta">', '', ' <div class="producer">', ' <h3>{{_ \'Producer\'}}</h3>', ' <a title="{{organization.name}}" href="{{organization.page}}">', ' <img class="organization-logo producer" alt="{{organization.name}}" src="{{fulllogo organization.logo}}"><br>', ' <span class="name">', ' {{organization.name}}', ' </span>', ' </a>', ' </div>', '', '', ' <div class="info">', ' <h3>{{_ \'Informations\'}}</h3>', ' <ul>', ' <li title="{{_ \'License\'}}" rel="tooltip">', ' <i class="fa fa-copyright"></i>', ' <!--a href="http://opendatacommons.org/licenses/odbl/summary/"-->', ' {{_ license}}', ' <!--/a-->', ' </li>', ' <li title="{{_ \'Frequency\'}}" rel="tooltip">', ' <span class="fa fa-clock-o"></span>', ' {{_ frequency}}', ' </li>', ' <li title="{{_ \'Spatial granularity\'}}" rel="tooltip">', ' <span class="fa fa-bullseye"></span>', ' {{_ spatial.granularity}}', ' </li>', ' </ul>', ' <ul class="spatial_zones">', ' {{#each spatial.zones}}', ' <li data-zone="{{.}}">{{.}}</li>', ' {{/each}}', ' </ul>', ' <ul class="tags">', ' {{#each tags}}', ' <li><a title="{{.}}" href="https://www.data.gouv.fr/fr/search/?tag={{.}}">', ' {{.}}', ' </a>', ' </li>', ' {{/each}}', ' </ul>', ' <div class="Metaclic-clear">', ' </div>', ' </div>', ' </div>', '', '', ' </div>' ]; MetaclicUtils.Templates.organizationAdd = [ '{{#if generator}}', '<div class="organization_add">', '<h1>Générateur de code metaClic</h1></br>', '<i>Ajoutez des organismes en saisissant leur nom et en les sélectionnant dans la liste</i></br>', ' <input type="text" name="research" list="metaclic-autocomplete-list" class="form-control" placeholder="Organisation">', ' <datalist id="metaclic-autocomplete-list">', ' </datalist>', '</div>', ' <ul class="tags">', ' {{#if orgs}}', ' {{#each orgs}}', ' {{#if name}}', ' <li><a title="Fermer" href="#" class="facet-remove facet-organization" data-removeorganizationtoorigin="organization" data-id="{{id}}"> {{name}} ×</a></li>', ' {{/if}}', ' {{/each}}', ' {{/if}}', ' </ul>', ' <div class="Metaclic-clear"></div>', '{{/if}}', ]; MetaclicUtils.Templates.datasetsForm = [ '<div class="datasetsForm">', ' <form action="" method="get">', ' <input type="hidden" name="option" value="com_metaclic"></input>', ' <input type="hidden" name="view" value="metaclic"></input>', ' <div><label>&nbsp;</label><input type="text" name="q" value="{{q}}" placeholder="Rechercher des données" class="form-control"></input></div>', ' {{#ifCount orgs ">" 1 }}', ' <div>', ' {{else}}', ' <div class="hidden">', ' {{/ifCount}}', ' </div>', ' </form>', ' <div class="selected_facets">', '<ul class="tags">', ' {{#if organization}}', ' {{#if organization_name}}', ' {{#ifNotall organization "|"}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-organization" data-removeOrganization="organization"> {{organization_name}} &times;</a></li>', ' {{/ifNotall}}', ' {{/if}}', ' {{/if}}', ' {{#if tags}}', ' {{#each tags}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-tag" data-removeTag="{{.}}"><i class="fa fa-tags fa-fw"></i> {{.}} &times;</a></li>', ' {{/each}}', ' {{/if}}', ' {{#if license}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-license" data-removeParam="license"><i class="fa fa-copyright fa-fw"></i> {{license}} &times;</a></li>', ' {{/if}}', ' {{#if geozone}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-geozone" data-removeParam="geozone"><i class="fa fa-map-marker fa-fw"></i> {{geozone}} &times;</a></li>', ' {{/if}}', ' {{#if granularity}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-granularity" data-removeParam="granularity"><i class="fa fa-bullseye fa-fw"></i> {{granularity}} &times;</a></li>', ' {{/if}}', ' {{#if format}}', ' <li><a title="{{_ \'fermer\'}}" href="#" class="facet-remove facet-format" data-removeParam="format"><i class="fa fa-file fa-fw"></i> {{format}} &times;</a></li>', ' {{/if}}', ' </div>', ' </ul>', '</div>', ' <br>' ]; MetaclicUtils.Templates.lastdatasets = [ '<div class="Metaclic-lastdatasets">', ' {{#each data}}', ' <div class="card dataset-card">', ' <a class="card-logo" href="{{ organization.uri }}" target="datagouv">', ' <img alt="{{ organization.name }}" src="{{ organization.logo }}" width="70" height="70">', ' </a>', ' <div class="card-body">', ' <h4>', ' <a href="{{ url }}" title="{{title}}">', ' {{title}}', ' </a>', ' </h4>', ' </div>', ' <footer>', ' <ul>', ' <li>', ' <a rel="tooltip" data-placement="top" data-container="body" title="" data-original-title="Réutilisations">', ' <span class="fa fa-retweet fa-fw"></span>', ' {{default metrics.reuses 0 }}', ' </a>', ' </li>', ' <li>', ' <a rel="tooltip" data-placement="top" data-container="body" title="" data-original-title="Favoris">', ' <span class="fa fa-star fa-fw"></span>', ' {{default metrics.followers 0 }}', ' </a>', ' </li>', ' </ul>', ' </footer>', ' <a href="{{ url }}" title="{{title}}">', ' {{trimString description}}', ' </a>', ' <footer>', ' <ul>', ' {{#if temporal_coverage }}', ' <li>', ' <a rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Temporal coverage\' }}">', ' <span class="fa fa-calendar fa-fw"></span>', ' {{dt temporal_coverage.start format=\'L\' }} {{_ \'to\'}} {{dt temporal_coverage.end format=\'L\' }}', ' </a>', ' </li>', ' {{/if}}', '', ' {{#if spatial.granularity }}', ' <li>', ' <a rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Territorial coverage granularity\' }}">', ' <span class="fa fa-bullseye fa-fw"></span>', ' {{_ spatial.granularity }}', ' </a>', ' </li>', ' {{/if}}', '', ' {{#if frequency }}', ' <li>', ' <a rel="tooltip"', ' data-placement="top" data-container="body"', ' title="{{_ \'Frequency\' }}">', ' <span class="fa fa-clock-o fa-fw"></span>', ' {{_ frequency }}', ' </a>', ' </li>', ' {{/if}}', ' </ul>', ' </footer>', ' </div>', ' {{/each}}', ' </div>' ]; MetaclicUtils.Templates.shareCode = [ '{{#if generator}}', '{{#if organizationList}}', '<div class="Metaclic-shareCode">', '<div>', ' Code à intégrer dans votre site Internet :', ' <pre>', '&lt;script&gt;window.jQuery || document.write("&lt;script src=\'https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.0/jquery.min.js\'&gt;&lt;\\\/script&gt;")&lt;/script&gt;', '', '&lt;!-- chargement feuille de style font-awesome --&gt;', '&lt;link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.5.0/css/font-awesome.min.css"&gt;', '', '&lt;script src="https://unpkg.com/metaclic/dist/metaclic.js"&gt;&lt;/script&gt;', '&lt;div class="Metaclic-data"', ' data-q="{{q}}"', ' data-organizations="{{organizationList}}"', ' data-background_layers="{{background_layers}}"', ' data-facets="all"', ' data-page_size="{{page_size}}"', '&gt&lt;/div&gt', ' </pre>', " <p>Plus de paramétrage disponible dans la documentation: <a href='https://github.com/datakode/metaclic/wiki/Personnalisation' target='_blank'>https://github.com/datakode/metaclic/wiki/Personnalisation</a></p>", " <p><h1>Prévisualisation : </h1></p>", '</div>', '</div>', '{{/if}}', '{{/if}}', ]; MetaclicUtils.Templates.shareLinkMap = [ '<div class="MetaclicMap-shareLink">', '<div class="linkDiv"><a href="#">intégrez cette carte à votre site&nbsp;<i class="fa fa-share-alt"></i></a></div>', '<div class="hidden">', ' <h4>Vous pouvez intégrer cet carte sur votre site</h4>', ' <p>Pour ceci collez le code suivant dans le code HTML de votre page</p>', ' <pre>', '&lt;script&gt;window.jQuery || document.write("&lt;script src=\'https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.0/jquery.min.js\'&gt;&lt;\\\/script&gt;")&lt;/script&gt;', '', '&lt;script src="{{baseUrl}}metaclic.js"&gt;&lt;/script&gt;', '&lt;div class="Metaclic-map"', " data-resources='{{jsonencode resources}}'", //" data-leaflet_map_options='{{jsonencode leaflet_map_options}}'", " data-title='{{title}}'", '&gt&lt;/div&gt', ' </pre>', " <p>vous pouvez trouver plus d'info sur cet outil et son paramétrage à cette adresse: <a href='https://github.com/datakode/metaclic' target='_blank'>https://github.com/datakode/metaclic</a></p>", '</div>', '</div>', ]; MetaclicUtils.Templates.li_resource = [ '<li data-id="{{id}}">', '<a href="{{metadata_url}}">{{title}}</a>', '<i class="fa fa-copyright"></i> {{_ license}}', '<p class="organization" data-id="{{organization.id}}" data-slug="{{organization.slug}}">', '<img alt="{{ organization.name }}" src="{{ organization.logo }}">', '<span>{{organization.name}}</span>', '</p>', '</li>' ];<|fim▁end|>
' {{#ifCond id "==" ../sort}}', ' <option value="{{id}}" selected>{{name}}</option>', ' {{else}}',
<|file_name|>constexpr-builtin-bit-cast.cpp<|end_file_name|><|fim▁begin|>// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s // RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s -fno-signed-char // RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple aarch64_be-linux-gnu %s #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define LITTLE_END 1 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define LITTLE_END 0 #else # error "huh?" #endif template <class T, class V> struct is_same { static constexpr bool value = false; }; template <class T> struct is_same<T, T> { static constexpr bool value = true; }; static_assert(sizeof(int) == 4); static_assert(sizeof(long long) == 8); template <class To, class From> constexpr To bit_cast(const From &from) { static_assert(sizeof(To) == sizeof(From)); // expected-note@+9 {{cannot be represented in type 'bool'}} #ifdef __x86_64 // expected-note@+7 {{or 'std::byte'; '__int128' is invalid}} #endif #ifdef __CHAR_UNSIGNED__ // expected-note@+4 2 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'signed char' is invalid}} #else // expected-note@+2 2 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'signed char' is invalid}} #endif return __builtin_bit_cast(To, from); } template <class Intermediate, class Init> constexpr bool round_trip(const Init &init) { return bit_cast<Init>(bit_cast<Intermediate>(init)) == init; } void test_int() { static_assert(round_trip<unsigned>((int)-1)); static_assert(round_trip<unsigned>((int)0x12345678)); static_assert(round_trip<unsigned>((int)0x87654321)); static_assert(round_trip<unsigned>((int)0x0C05FEFE)); } void test_array() { constexpr unsigned char input[] = {0xCA, 0xFE, 0xBA, 0xBE}; constexpr unsigned expected = LITTLE_END ? 0xBEBAFECA : 0xCAFEBABE; static_assert(bit_cast<unsigned>(input) == expected); } void test_record() { struct int_splicer { unsigned x; unsigned y; constexpr bool operator==(const int_splicer &other) const { return other.x == x && other.y == y; } }; constexpr int_splicer splice{0x0C05FEFE, 0xCAFEBABE}; static_assert(bit_cast<unsigned long long>(splice) == (LITTLE_END ? 0xCAFEBABE0C05FEFE : 0x0C05FEFECAFEBABE)); static_assert(bit_cast<int_splicer>(0xCAFEBABE0C05FEFE).x == (LITTLE_END ? 0x0C05FEFE : 0xCAFEBABE)); static_assert(round_trip<unsigned long long>(splice)); static_assert(round_trip<long long>(splice)); struct base2 { }; struct base3 { unsigned z; }; struct bases : int_splicer, base2, base3 {<|fim▁hole|> unsigned x, y, z, doublez; constexpr bool operator==(tuple4 const &other) const { return x == other.x && y == other.y && z == other.z && doublez == other.doublez; } }; constexpr bases b = {{1, 2}, {}, {3}, 4}; constexpr tuple4 t4 = bit_cast<tuple4>(b); static_assert(t4 == tuple4{1, 2, 3, 4}); static_assert(round_trip<tuple4>(b)); } void test_partially_initialized() { struct pad { signed char x; int y; }; struct no_pad { signed char x; signed char p1, p2, p3; int y; }; static_assert(sizeof(pad) == sizeof(no_pad)); constexpr pad pir{4, 4}; // expected-error@+2 {{constexpr variable 'piw' must be initialized by a constant expression}} // expected-note@+1 {{in call to 'bit_cast(pir)'}} constexpr int piw = bit_cast<no_pad>(pir).x; // expected-error@+2 {{constexpr variable 'bad' must be initialized by a constant expression}} // expected-note@+1 {{in call to 'bit_cast(pir)'}} constexpr no_pad bad = bit_cast<no_pad>(pir); constexpr pad fine = bit_cast<pad>(no_pad{1, 2, 3, 4, 5}); static_assert(fine.x == 1 && fine.y == 5); } void no_bitfields() { // FIXME! struct S { unsigned char x : 8; }; struct G { unsigned char x : 8; }; constexpr S s{0}; // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}} // expected-note@+1 {{constexpr bit_cast involving bit-field is not yet supported}} constexpr G g = __builtin_bit_cast(G, s); } void array_members() { struct S { int ar[3]; constexpr bool operator==(const S &rhs) { return ar[0] == rhs.ar[0] && ar[1] == rhs.ar[1] && ar[2] == rhs.ar[2]; } }; struct G { int a, b, c; constexpr bool operator==(const G &rhs) { return a == rhs.a && b == rhs.b && c == rhs.c; } }; constexpr S s{{1, 2, 3}}; constexpr G g = bit_cast<G>(s); static_assert(g.a == 1 && g.b == 2 && g.c == 3); static_assert(round_trip<G>(s)); static_assert(round_trip<S>(g)); } void bad_types() { union X { int x; }; struct G { int g; }; // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a union type is not allowed in a constant expression}} constexpr G g = __builtin_bit_cast(G, X{0}); // expected-error@+2 {{constexpr variable 'x' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast to a union type is not allowed in a constant expression}} constexpr X x = __builtin_bit_cast(X, G{0}); struct has_pointer { // expected-note@+1 2 {{invalid type 'int *' is a member of 'has_pointer'}} int *ptr; }; // expected-error@+2 {{constexpr variable 'ptr' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a pointer type is not allowed in a constant expression}} constexpr unsigned long ptr = __builtin_bit_cast(unsigned long, has_pointer{0}); // expected-error@+2 {{constexpr variable 'hptr' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast to a pointer type is not allowed in a constant expression}} constexpr has_pointer hptr = __builtin_bit_cast(has_pointer, 0ul); } void backtrace() { struct A { // expected-note@+1 {{invalid type 'int *' is a member of 'A'}} int *ptr; }; struct B { // expected-note@+1 {{invalid type 'A [10]' is a member of 'B'}} A as[10]; }; // expected-note@+1 {{invalid type 'B' is a base of 'C'}} struct C : B { }; struct E { unsigned long ar[10]; }; // expected-error@+2 {{constexpr variable 'e' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a pointer type is not allowed in a constant expression}} constexpr E e = __builtin_bit_cast(E, C{}); } void test_array_fill() { constexpr unsigned char a[4] = {1, 2}; constexpr unsigned int i = bit_cast<unsigned int>(a); static_assert(i == (LITTLE_END ? 0x00000201 : 0x01020000)); } typedef decltype(nullptr) nullptr_t; #ifdef __CHAR_UNSIGNED__ // expected-note@+5 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'unsigned long' is invalid}} #else // expected-note@+3 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'unsigned long' is invalid}} #endif // expected-error@+1 {{constexpr variable 'test_from_nullptr' must be initialized by a constant expression}} constexpr unsigned long test_from_nullptr = __builtin_bit_cast(unsigned long, nullptr); constexpr int test_from_nullptr_pass = (__builtin_bit_cast(unsigned char[8], nullptr), 0); constexpr int test_to_nullptr() { nullptr_t npt = __builtin_bit_cast(nullptr_t, 0ul); struct indet_mem { unsigned char data[sizeof(void *)]; }; indet_mem im = __builtin_bit_cast(indet_mem, nullptr); nullptr_t npt2 = __builtin_bit_cast(nullptr_t, im); return 0; } constexpr int ttn = test_to_nullptr(); // expected-warning@+2 {{returning reference to local temporary object}} // expected-note@+1 {{temporary created here}} constexpr const long &returns_local() { return 0L; } // expected-error@+2 {{constexpr variable 'test_nullptr_bad' must be initialized by a constant expression}} // expected-note@+1 {{read of temporary whose lifetime has ended}} constexpr nullptr_t test_nullptr_bad = __builtin_bit_cast(nullptr_t, returns_local()); constexpr int test_indeterminate(bool read_indet) { struct pad { char a; int b; }; struct no_pad { char a; unsigned char p1, p2, p3; int b; }; pad p{1, 2}; no_pad np = bit_cast<no_pad>(p); int tmp = np.a + np.b; unsigned char& indet_ref = np.p1; if (read_indet) { // expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}} tmp = indet_ref; } indet_ref = 0; return 0; } constexpr int run_test_indeterminate = test_indeterminate(false); // expected-error@+2 {{constexpr variable 'run_test_indeterminate2' must be initialized by a constant expression}} // expected-note@+1 {{in call to 'test_indeterminate(true)'}} constexpr int run_test_indeterminate2 = test_indeterminate(true); struct ref_mem { const int &rm; }; constexpr int global_int = 0; // expected-error@+2 {{constexpr variable 'run_ref_mem' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a type with a reference member is not allowed in a constant expression}} constexpr unsigned long run_ref_mem = __builtin_bit_cast( unsigned long, ref_mem{global_int}); union u { int im; }; // expected-error@+2 {{constexpr variable 'run_u' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a union type is not allowed in a constant expression}} constexpr int run_u = __builtin_bit_cast(int, u{32}); struct vol_mem { volatile int x; }; // expected-error@+2 {{constexpr variable 'run_vol_mem' must be initialized by a constant expression}} // expected-note@+1 {{non-literal type 'vol_mem' cannot be used in a constant expression}} constexpr int run_vol_mem = __builtin_bit_cast(int, vol_mem{43}); struct mem_ptr { int vol_mem::*x; // expected-note{{invalid type 'int vol_mem::*' is a member of 'mem_ptr'}} }; // expected-error@+2 {{constexpr variable 'run_mem_ptr' must be initialized by a constant expression}} // expected-note@+1 {{bit_cast from a member pointer type is not allowed in a constant expression}} constexpr int run_mem_ptr = __builtin_bit_cast(unsigned long, mem_ptr{nullptr}); struct A { char c; /* char padding : 8; */ short s; }; struct B { unsigned char x[4]; }; constexpr B one() { A a = {1, 2}; return bit_cast<B>(a); } constexpr char good_one = one().x[0] + one().x[2] + one().x[3]; // expected-error@+2 {{constexpr variable 'bad_one' must be initialized by a constant expression}} // expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}} constexpr char bad_one = one().x[1]; constexpr A two() { B b = one(); // b.x[1] is indeterminate. b.x[0] = 'a'; b.x[2] = 1; b.x[3] = 2; return bit_cast<A>(b); } constexpr short good_two = two().c + two().s; namespace std { enum byte : unsigned char {}; } enum my_byte : unsigned char {}; struct pad { char a; int b; }; constexpr int ok_byte = (__builtin_bit_cast(std::byte[8], pad{1, 2}), 0); constexpr int ok_uchar = (__builtin_bit_cast(unsigned char[8], pad{1, 2}), 0); #ifdef __CHAR_UNSIGNED__ // expected-note@+5 {{indeterminate value can only initialize an object of type 'unsigned char', 'char', or 'std::byte'; 'my_byte' is invalid}}}} #else // expected-note@+3 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'my_byte' is invalid}} #endif // expected-error@+1 {{constexpr variable 'bad_my_byte' must be initialized by a constant expression}} constexpr int bad_my_byte = (__builtin_bit_cast(my_byte[8], pad{1, 2}), 0); #ifndef __CHAR_UNSIGNED__ // expected-error@+3 {{constexpr variable 'bad_char' must be initialized by a constant expression}} // expected-note@+2 {{indeterminate value can only initialize an object of type 'unsigned char' or 'std::byte'; 'char' is invalid}} #endif constexpr int bad_char = (__builtin_bit_cast(char[8], pad{1, 2}), 0); struct pad_buffer { unsigned char data[sizeof(pad)]; }; constexpr bool test_pad_buffer() { pad x = {1, 2}; pad_buffer y = __builtin_bit_cast(pad_buffer, x); pad z = __builtin_bit_cast(pad, y); return x.a == z.a && x.b == z.b; } static_assert(test_pad_buffer()); constexpr unsigned char identity1a = 42; constexpr unsigned char identity1b = __builtin_bit_cast(unsigned char, identity1a); static_assert(identity1b == 42); struct IdentityInStruct { unsigned char n; }; constexpr IdentityInStruct identity2a = {42}; constexpr unsigned char identity2b = __builtin_bit_cast(unsigned char, identity2a.n); union IdentityInUnion { unsigned char n; }; constexpr IdentityInUnion identity3a = {42}; constexpr unsigned char identity3b = __builtin_bit_cast(unsigned char, identity3a.n); namespace test_bool { constexpr bool test_bad_bool = bit_cast<bool>('A'); // expected-error {{must be initialized by a constant expression}} expected-note{{in call}} static_assert(round_trip<signed char>(true), ""); static_assert(round_trip<unsigned char>(false), ""); static_assert(round_trip<bool>(false), ""); static_assert(round_trip<bool>((char)0), ""); static_assert(round_trip<bool>((char)1), ""); } namespace test_long_double { #ifdef __x86_64 constexpr __int128_t test_cast_to_int128 = bit_cast<__int128_t>((long double)0); // expected-error{{must be initialized by a constant expression}} expected-note{{in call}} constexpr long double ld = 3.1425926539; struct bytes { unsigned char d[16]; }; static_assert(round_trip<bytes>(ld), ""); static_assert(round_trip<long double>(10.0L)); constexpr bool f(bool read_uninit) { bytes b = bit_cast<bytes>(ld); unsigned char ld_bytes[10] = { 0x0, 0x48, 0x9f, 0x49, 0xf0, 0x3c, 0x20, 0xc9, 0x0, 0x40, }; for (int i = 0; i != 10; ++i) if (ld_bytes[i] != b.d[i]) return false; if (read_uninit && b.d[10]) // expected-note{{read of uninitialized object is not allowed in a constant expression}} return false; return true; } static_assert(f(/*read_uninit=*/false), ""); static_assert(f(/*read_uninit=*/true), ""); // expected-error{{static_assert expression is not an integral constant expression}} expected-note{{in call to 'f(true)'}} constexpr bytes ld539 = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x86, 0x8, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, }; constexpr long double fivehundredandthirtynine = 539.0; static_assert(bit_cast<long double>(ld539) == fivehundredandthirtynine, ""); #else static_assert(round_trip<__int128_t>(34.0L)); #endif }<|fim▁end|>
unsigned doublez; }; struct tuple4 {
<|file_name|>parser.py<|end_file_name|><|fim▁begin|>import numpy as np import re<|fim▁hole|> ''' prende il filename e ritorna la lista [[[pt1.x,pt1.y],...],[blocco2],...] ''' f=open(filename) block=False lastlist=[] listone=[] for i in f: print(i) if(re.match(r"[\d\.]+e[\+\-][\d]+\t[\d\.]+e[\+\-][\d]+", i)): print("entrato") block=True slast=re.findall(r"[\d\.]+e[\+\-][\d]+", i) lastlist.append(list(map(float, slast))) else: if(block): listone.append(lastlist) lastlist=[] block=False return listone print("Test...") l=GetBlocks(r'C:\Users\silvanamorreale\Documents\GitHub\Lab3.2\parser test.txt') print(l)<|fim▁end|>
import os def GetBlocks(filename):
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # # grmpy documentation build configuration file, created by # sphinx-quickstart on Fri Aug 18 13:05:32 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys # Set variable so that todos are shown in local build on_rtd = os.environ.get("READTHEDOCS") == "True" # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.ifconfig", "sphinx.ext.viewcode", "sphinxcontrib.bibtex", "sphinx.ext.imgconverter", ] bibtex_bibfiles = ["source/refs.bib"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "grmpy" copyright_ = "2018, grmpy-dev team" author = "grmpy-dev team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1.0" # The full version, including alpha/beta/rc tags. release = "1.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. We # want to supress the output on readthedocs. if on_rtd: todo_include_todos = False else: todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes.<|fim▁hole|># html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "grmpydoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # "pointsize": "12pt", # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # "figure_align": "htbp", } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "grmpy.tex", "grmpy Documentation", "Development Team", "manual") ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "grmpy", "grmpy Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "grmpy", "grmpy Documentation", author, "grmpy", "One line description of project.", "Miscellaneous", ) ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright_ # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"]<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models from model_utils import Choices from model_utils.models import TimeStampedModel class History(TimeStampedModel): RESOLUTIONS = Choices('second', 'minute', 'hour', 'day', 'week', 'month', 'year') resolution = models.CharField(choices=RESOLUTIONS, default=RESOLUTIONS.day, max_length=6) tag = models.SlugField() datetime = models.DateTimeField() source_type = models.ForeignKey(ContentType) source_id = models.PositiveIntegerField(blank=True, null=True) source_object = GenericForeignKey('source_type', 'source_id') sum = models.IntegerField(default=0) delta = models.IntegerField(default=0) class Meta: get_latest_by = 'datetime' verbose_name_plural = 'histories' def __unicode__(self): return u'%s' % (self.tag) def save(self, *args, **kwargs): try:<|fim▁hole|> pass else: self.delta = self.sum - previous.sum super(History, self).save(*args, **kwargs)<|fim▁end|>
filters = {'resolution': self.resolution, 'tag': self.tag} previous = self._default_manager.filter(**filters).latest() except self._meta.model.DoesNotExist:
<|file_name|>panic_in_result_fn_assertions.rs<|end_file_name|><|fim▁begin|>#![warn(clippy::panic_in_result_fn)] #![allow(clippy::unnecessary_wraps)] struct A; impl A { fn result_with_assert_with_message(x: i32) -> Result<bool, String> // should emit lint { assert!(x == 5, "wrong argument"); Ok(true) } fn result_with_assert_eq(x: i32) -> Result<bool, String> // should emit lint { assert_eq!(x, 5); Ok(true) } fn result_with_assert_ne(x: i32) -> Result<bool, String> // should emit lint { assert_ne!(x, 1); Ok(true) } fn other_with_assert_with_message(x: i32) // should not emit lint {<|fim▁hole|> fn other_with_assert_eq(x: i32) // should not emit lint { assert_eq!(x, 5); } fn other_with_assert_ne(x: i32) // should not emit lint { assert_ne!(x, 1); } fn result_without_banned_functions() -> Result<bool, String> // should not emit lint { let assert = "assert!"; println!("No {}", assert); Ok(true) } } fn main() {}<|fim▁end|>
assert!(x == 5, "wrong argument"); }
<|file_name|>ArUrg.java<|end_file_name|><|fim▁begin|>/* Adept MobileRobots Robotics Interface for Applications (ARIA) Copyright (C) 2004-2005 ActivMedia Robotics LLC Copyright (C) 2006-2010 MobileRobots Inc. Copyright (C) 2011-2014 Adept Technology This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA If you wish to redistribute ARIA under different terms, contact Adept MobileRobots for information about a commercial version of ARIA at [email protected] or <|fim▁hole|>Adept MobileRobots, 10 Columbia Drive, Amherst, NH 03031; +1-603-881-7960 */ /* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 1.3.40 * * Do not make changes to this file unless you know what you are doing--modify * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ package com.mobilerobots.Aria; public class ArUrg extends ArLaser { /* (begin code from javabody_derived typemap) */ private long swigCPtr; /* for internal use by swig only */ public ArUrg(long cPtr, boolean cMemoryOwn) { super(AriaJavaJNI.SWIGArUrgUpcast(cPtr), cMemoryOwn); swigCPtr = cPtr; } /* for internal use by swig only */ public static long getCPtr(ArUrg obj) { return (obj == null) ? 0 : obj.swigCPtr; } /* (end code from javabody_derived typemap) */ protected void finalize() { delete(); } public synchronized void delete() { if (swigCPtr != 0) { if (swigCMemOwn) { swigCMemOwn = false; AriaJavaJNI.delete_ArUrg(swigCPtr); } swigCPtr = 0; } super.delete(); } public ArUrg(int laserNumber, String name) { this(AriaJavaJNI.new_ArUrg__SWIG_0(laserNumber, name), true); } public ArUrg(int laserNumber) { this(AriaJavaJNI.new_ArUrg__SWIG_1(laserNumber), true); } public boolean blockingConnect() { return AriaJavaJNI.ArUrg_blockingConnect(swigCPtr, this); } public boolean asyncConnect() { return AriaJavaJNI.ArUrg_asyncConnect(swigCPtr, this); } public boolean disconnect() { return AriaJavaJNI.ArUrg_disconnect(swigCPtr, this); } public boolean isConnected() { return AriaJavaJNI.ArUrg_isConnected(swigCPtr, this); } public boolean isTryingToConnect() { return AriaJavaJNI.ArUrg_isTryingToConnect(swigCPtr, this); } public void log() { AriaJavaJNI.ArUrg_log(swigCPtr, this); } }<|fim▁end|>
<|file_name|>constant-in-match-pattern.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // min-lldb-version: 310 // compile-flags:-g #![allow(dead_code, unused_variables)] #![omit_gdb_pretty_printer_section] // This test makes sure that the compiler doesn't crash when trying to assign<|fim▁hole|> const CONSTANT: u64 = 3; struct Struct { a: isize, b: usize, } const STRUCT: Struct = Struct { a: 1, b: 2 }; struct TupleStruct(u32); const TUPLE_STRUCT: TupleStruct = TupleStruct(4); enum Enum { Variant1(char), Variant2 { a: u8 }, Variant3 } const VARIANT1: Enum = Enum::Variant1('v'); const VARIANT2: Enum = Enum::Variant2 { a: 2 }; const VARIANT3: Enum = Enum::Variant3; const STRING: &'static str = "String"; fn main() { match 1 { CONSTANT => {} _ => {} }; // if let 3 = CONSTANT {} match (Struct { a: 2, b: 2 }) { STRUCT => {} _ => {} }; // if let STRUCT = STRUCT {} match TupleStruct(3) { TUPLE_STRUCT => {} _ => {} }; // if let TupleStruct(4) = TUPLE_STRUCT {} match VARIANT3 { VARIANT1 => {}, VARIANT2 => {}, VARIANT3 => {}, _ => {} }; match (VARIANT3, VARIANT2) { (VARIANT1, VARIANT3) => {}, (VARIANT2, VARIANT2) => {}, (VARIANT3, VARIANT1) => {}, _ => {} }; // if let VARIANT1 = Enum::Variant3 {} // if let VARIANT2 = Enum::Variant3 {} // if let VARIANT3 = Enum::Variant3 {} match "abc" { STRING => {}, _ => {} } if let STRING = "def" {} }<|fim▁end|>
// debug locations to 'constant' patterns in match expressions.
<|file_name|>sha2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module implements only the Sha256 function since that is all that is needed for internal //! use. This implementation is not intended for external use or for any use where security is //! important. #![allow(deprecated)] // to_be32 use std::iter::{range_step, repeat}; use std::num::Int; use std::slice::bytes::{MutableByteVector, copy_memory}; use serialize::hex::ToHex; /// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian /// format. fn write_u32_be(dst: &mut[u8], input: u32) { dst[0] = (input >> 24) as u8; dst[1] = (input >> 16) as u8; dst[2] = (input >> 8) as u8; dst[3] = input as u8; } /// Read the value of a vector of bytes as a u32 value in big-endian format. fn read_u32_be(input: &[u8]) -> u32 { return (input[0] as u32) << 24 | (input[1] as u32) << 16 | (input[2] as u32) << 8 | (input[3] as u32); } /// Read a vector of bytes into a vector of u32s. The values are read in big-endian format. fn read_u32v_be(dst: &mut[u32], input: &[u8]) { assert!(dst.len() * 4 == input.len()); let mut pos = 0u; for chunk in input.chunks(4) { dst[pos] = read_u32_be(chunk); pos += 1; } } trait ToBits { /// Convert the value in bytes to the number of bits, a tuple where the 1st item is the /// high-order value and the 2nd item is the low order value. fn to_bits(self) -> (Self, Self); } impl ToBits for u64 { fn to_bits(self) -> (u64, u64) { return (self >> 61, self << 3); } } /// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric /// overflow. fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T { let (new_high_bits, new_low_bits) = bytes.to_bits(); if new_high_bits > Int::zero() { panic!("numeric overflow occurred.") } match bits.checked_add(new_low_bits) { Some(x) => return x, None => panic!("numeric overflow occurred.") } } /// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it /// must be processed. The input() method takes care of processing and then clearing the buffer /// automatically. However, other methods do not and require the caller to process the buffer. Any /// method that modifies the buffer directory or provides the caller with bytes that can be modified /// results in those bytes being marked as used by the buffer. trait FixedBuffer { /// Input a vector of bytes. If the buffer becomes full, process it with the provided /// function and then clear the buffer. fn input<F>(&mut self, input: &[u8], func: F) where F: FnMut(&[u8]); /// Reset the buffer. fn reset(&mut self); /// Zero the buffer up until the specified index. The buffer position currently must not be /// greater than that index. fn zero_until(&mut self, idx: uint); /// Get a slice of the buffer of the specified size. There must be at least that many bytes /// remaining in the buffer. fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8]; /// Get the current buffer. The buffer must already be full. This clears the buffer as well. fn full_buffer<'s>(&'s mut self) -> &'s [u8]; /// Get the current position of the buffer. fn position(&self) -> uint; /// Get the number of bytes remaining in the buffer until it is full. fn remaining(&self) -> uint; /// Get the size of the buffer fn size(&self) -> uint; } /// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize. struct FixedBuffer64 { buffer: [u8; 64], buffer_idx: uint, } impl FixedBuffer64 { /// Create a new FixedBuffer64 fn new() -> FixedBuffer64 { return FixedBuffer64 { buffer: [0u8; 64], buffer_idx: 0 }; } } impl FixedBuffer for FixedBuffer64 { fn input<F>(&mut self, input: &[u8], mut func: F) where F: FnMut(&[u8]), { let mut i = 0; let size = self.size(); // If there is already data in the buffer, copy as much as we can into it and process // the data if the buffer becomes full. if self.buffer_idx != 0 { let buffer_remaining = size - self.buffer_idx; if input.len() >= buffer_remaining { copy_memory( self.buffer.slice_mut(self.buffer_idx, size), &input[..buffer_remaining]); self.buffer_idx = 0; func(&self.buffer); i += buffer_remaining; } else { copy_memory( self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()), input); self.buffer_idx += input.len(); return; } } // While we have at least a full buffer size chunk's worth of data, process that data // without copying it into the buffer while input.len() - i >= size { func(&input[i..i + size]); i += size; } // Copy any input data into the buffer. At this point in the method, the amount of // data left in the input vector will be less than the buffer size and the buffer will // be empty. let input_remaining = input.len() - i; copy_memory( self.buffer.slice_to_mut(input_remaining), &input[i..]); self.buffer_idx += input_remaining; } fn reset(&mut self) { self.buffer_idx = 0; } fn zero_until(&mut self, idx: uint) { assert!(idx >= self.buffer_idx); self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0); self.buffer_idx = idx; } fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] { self.buffer_idx += len; return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx); } fn full_buffer<'s>(&'s mut self) -> &'s [u8] { assert!(self.buffer_idx == 64); self.buffer_idx = 0; return &self.buffer[..64]; } fn position(&self) -> uint { self.buffer_idx } fn remaining(&self) -> uint { 64 - self.buffer_idx } fn size(&self) -> uint { 64 } } /// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct. trait StandardPadding { /// Add padding to the buffer. The buffer must not be full when this method is called and is /// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least /// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled /// with zeros again until only rem bytes are remaining. fn standard_padding<F>(&mut self, rem: uint, func: F) where F: FnMut(&[u8]); } impl <T: FixedBuffer> StandardPadding for T { fn standard_padding<F>(&mut self, rem: uint, mut func: F) where F: FnMut(&[u8]) { let size = self.size(); self.next(1)[0] = 128; if self.remaining() < rem { self.zero_until(size); func(self.full_buffer()); } self.zero_until(size - rem); } } /// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2 /// family of digest functions. pub trait Digest { /// Provide message data. /// /// # Arguments /// /// * input - A vector of message data fn input(&mut self, input: &[u8]); /// Retrieve the digest result. This method may be called multiple times. /// /// # Arguments /// /// * out - the vector to hold the result. Must be large enough to contain output_bits(). fn result(&mut self, out: &mut [u8]); /// Reset the digest. This method must be called after result() and before supplying more /// data. fn reset(&mut self); /// Get the output size in bits. fn output_bits(&self) -> uint; /// Convenience function that feeds a string into a digest. /// /// # Arguments /// /// * `input` The string to feed into the digest fn input_str(&mut self, input: &str) { self.input(input.as_bytes()); } /// Convenience function that retrieves the result of a digest as a /// newly allocated vec of bytes. fn result_bytes(&mut self) -> Vec<u8> { let mut buf: Vec<u8> = repeat(0u8).take((self.output_bits()+7)/8).collect(); self.result(buf.as_mut_slice()); buf } /// Convenience function that retrieves the result of a digest as a /// String in hexadecimal format. fn result_str(&mut self) -> String { self.result_bytes().to_hex().to_string() } } // A structure that represents that state of a digest computation for the SHA-2 512 family of digest // functions struct Engine256State { h0: u32, h1: u32, h2: u32, h3: u32, h4: u32, h5: u32, h6: u32, h7: u32, } impl Engine256State { fn new(h: &[u32; 8]) -> Engine256State { return Engine256State { h0: h[0], h1: h[1], h2: h[2], h3: h[3], h4: h[4], h5: h[5], h6: h[6], h7: h[7] }; } fn reset(&mut self, h: &[u32; 8]) { self.h0 = h[0]; self.h1 = h[1]; self.h2 = h[2]; self.h3 = h[3]; self.h4 = h[4]; self.h5 = h[5]; self.h6 = h[6]; self.h7 = h[7]; } fn process_block(&mut self, data: &[u8]) { fn ch(x: u32, y: u32, z: u32) -> u32 { ((x & y) ^ ((!x) & z)) } fn maj(x: u32, y: u32, z: u32) -> u32 { ((x & y) ^ (x & z) ^ (y & z)) } fn sum0(x: u32) -> u32 { ((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10)) } fn sum1(x: u32) -> u32 { ((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7)) } fn sigma0(x: u32) -> u32 { ((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3) } fn sigma1(x: u32) -> u32 { ((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10) } let mut a = self.h0; let mut b = self.h1; let mut c = self.h2; let mut d = self.h3; let mut e = self.h4; let mut f = self.h5; let mut g = self.h6; let mut h = self.h7; let mut w = [0u32; 64]; // Sha-512 and Sha-256 use basically the same calculations which are implemented // by these macros. Inlining the calculations seems to result in better generated code. macro_rules! schedule_round { ($t:expr) => ( w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16]; ) } macro_rules! sha2_round { ($A:ident, $B:ident, $C:ident, $D:ident, $E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => ( { $H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t]; $D += $H; $H += sum0($A) + maj($A, $B, $C); } ) } read_u32v_be(w.slice_mut(0, 16), data); // Putting the message schedule inside the same loop as the round calculations allows for // the compiler to generate better code. for t in range_step(0u, 48, 8) { schedule_round!(t + 16); schedule_round!(t + 17); schedule_round!(t + 18); schedule_round!(t + 19); schedule_round!(t + 20); schedule_round!(t + 21); schedule_round!(t + 22); schedule_round!(t + 23); sha2_round!(a, b, c, d, e, f, g, h, K32, t); sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); } for t in range_step(48u, 64, 8) { sha2_round!(a, b, c, d, e, f, g, h, K32, t); sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); } self.h0 += a; self.h1 += b; self.h2 += c; self.h3 += d; self.h4 += e; self.h5 += f; self.h6 += g; self.h7 += h; } } static K32: [u32; 64] = [ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 ]; // A structure that keeps track of the state of the Sha-256 operation and contains the logic // necessary to perform the final calculations. struct Engine256 { length_bits: u64, buffer: FixedBuffer64, state: Engine256State, finished: bool, } impl Engine256 { fn new(h: &[u32; 8]) -> Engine256 { return Engine256 { length_bits: 0, buffer: FixedBuffer64::new(), state: Engine256State::new(h), finished: false } } fn reset(&mut self, h: &[u32; 8]) { self.length_bits = 0; self.buffer.reset(); self.state.reset(h); self.finished = false; } fn input(&mut self, input: &[u8]) { assert!(!self.finished); // Assumes that input.len() can be converted to u64 without overflow self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64); let self_state = &mut self.state; self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) }); } fn finish(&mut self) { if self.finished { return; } let self_state = &mut self.state; self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) }); write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 ); write_u32_be(self.buffer.next(4), self.length_bits as u32); self_state.process_block(self.buffer.full_buffer()); self.finished = true; } } /// The SHA-256 hash algorithm pub struct Sha256 { engine: Engine256 } impl Sha256 { /// Construct a new instance of a SHA-256 digest. pub fn new() -> Sha256 { Sha256 { engine: Engine256::new(&H256) } } } impl Digest for Sha256 { fn input(&mut self, d: &[u8]) { self.engine.input(d); } fn result(&mut self, out: &mut [u8]) { self.engine.finish(); write_u32_be(out.slice_mut(0, 4), self.engine.state.h0); write_u32_be(out.slice_mut(4, 8), self.engine.state.h1); write_u32_be(out.slice_mut(8, 12), self.engine.state.h2); write_u32_be(out.slice_mut(12, 16), self.engine.state.h3); write_u32_be(out.slice_mut(16, 20), self.engine.state.h4); write_u32_be(out.slice_mut(20, 24), self.engine.state.h5); write_u32_be(out.slice_mut(24, 28), self.engine.state.h6); write_u32_be(out.slice_mut(28, 32), self.engine.state.h7); } fn reset(&mut self) { self.engine.reset(&H256); } fn output_bits(&self) -> uint { 256 } } static H256: [u32; 8] = [ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 ]; #[cfg(test)] mod tests { extern crate rand; use self::rand::Rng; use self::rand::isaac::IsaacRng; use serialize::hex::FromHex; use std::iter::repeat; use std::num::Int; use super::{Digest, Sha256, FixedBuffer}; // A normal addition - no overflow occurs #[test] fn test_add_bytes_to_bits_ok() { assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180); } // A simple failure case - adding 1 to the max value #[test] #[should_fail] fn test_add_bytes_to_bits_overflow() { super::add_bytes_to_bits::<u64>(Int::max_value(), 1); } struct Test { input: String, output_str: String, } fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) { // Test that it works when accepting the message all at once for t in tests.iter() { sh.reset(); sh.input_str(t.input.as_slice()); let out_str = sh.result_str(); assert!(out_str == t.output_str); } // Test that it works when accepting the message in pieces for t in tests.iter() { sh.reset(); let len = t.input.len(); let mut left = len; while left > 0u { let take = (left + 1u) / 2u; sh.input_str(t.input .slice(len - left, take + len - left)); left = left - take; } let out_str = sh.result_str(); assert!(out_str == t.output_str); } } #[test] fn test_sha256() { // Examples from wikipedia let wikipedia_tests = vec!( Test { input: "".to_string(), output_str: "e3b0c44298fc1c149afb\ f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()<|fim▁hole|> output_str: "d7a8fbb307d7809469ca\ 9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string() }, Test { input: "The quick brown fox jumps over the lazy \ dog.".to_string(), output_str: "ef537f25c895bfa78252\ 6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string() }); let tests = wikipedia_tests; let mut sh = box Sha256::new(); test_hash(&mut *sh, tests.as_slice()); } /// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is /// correct. fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: uint, expected: &str) { let total_size = 1000000; let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect(); let mut rng = IsaacRng::new_unseeded(); let mut count = 0; digest.reset(); while count < total_size { let next: uint = rng.gen_range(0, 2 * blocksize + 1); let remaining = total_size - count; let size = if next > remaining { remaining } else { next }; digest.input(buffer.slice_to(size)); count += size; } let result_str = digest.result_str(); let result_bytes = digest.result_bytes(); assert_eq!(expected, result_str.as_slice()); let expected_vec: Vec<u8> = expected.from_hex() .unwrap() .into_iter() .collect(); assert_eq!(expected_vec, result_bytes); } #[test] fn test_1million_random_sha256() { let mut sh = Sha256::new(); test_digest_1million_random( &mut sh, 64, "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0"); } } #[cfg(test)] mod bench { extern crate test; use self::test::Bencher; use super::{Sha256, FixedBuffer, Digest}; #[bench] pub fn sha256_10(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1u8; 10]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } #[bench] pub fn sha256_1k(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1u8; 1024]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } #[bench] pub fn sha256_64k(b: &mut Bencher) { let mut sh = Sha256::new(); let bytes = [1u8; 65536]; b.iter(|| { sh.input(&bytes); }); b.bytes = bytes.len() as u64; } }<|fim▁end|>
}, Test { input: "The quick brown fox jumps over the lazy \ dog".to_string(),
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault('DJANGO_SETTINGS_MODULE',<|fim▁hole|> 'video_gallery.tests.south_settings') from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)<|fim▁end|>
<|file_name|>test_raft_paper.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::test_raft::*; use kvproto::eraftpb::*; use raft::*; use raft::storage::MemStorage; use protobuf::RepeatedField; pub fn hard_state(t: u64, c: u64, v: u64) -> HardState { let mut hs = HardState::new(); hs.set_term(t); hs.set_commit(c); hs.set_vote(v); hs } fn commit_noop_entry(r: &mut Interface, s: &MemStorage) { assert_eq!(r.state, StateRole::Leader); r.bcast_append(); // simulate the response of MsgAppend let msgs = r.read_messages(); for m in msgs { assert_eq!(m.get_msg_type(), MessageType::MsgAppend); assert_eq!(m.get_entries().len(), 1); assert!(m.get_entries()[0].get_data().is_empty()); r.step(accept_and_reply(m)).expect(""); } // ignore further messages to refresh followers' commit index r.read_messages(); s.wl() .append(r.raft_log.unstable_entries().unwrap_or(&[])) .expect(""); let committed = r.raft_log.committed; r.raft_log.applied_to(committed); let (last_index, last_term) = (r.raft_log.last_index(), r.raft_log.last_term()); r.raft_log.stable_to(last_index, last_term); } fn accept_and_reply(m: Message) -> Message { assert_eq!(m.get_msg_type(), MessageType::MsgAppend); let mut reply = new_message(m.get_to(), m.get_from(), MessageType::MsgAppendResponse, 0); reply.set_term(m.get_term()); reply.set_index(m.get_index() + m.get_entries().len() as u64); reply } #[test] fn test_follower_update_term_from_message() { test_update_term_from_message(StateRole::Follower); } #[test] fn test_candidate_update_term_from_message() { test_update_term_from_message(StateRole::Candidate); } #[test] fn test_leader_update_term_from_message() { test_update_term_from_message(StateRole::Leader); } // test_update_term_from_message tests that if one server’s current term is // smaller than the other’s, then it updates its current term to the larger // value. If a candidate or leader discovers that its term is out of date, // it immediately reverts to follower state. // Reference: section 5.1 fn test_update_term_from_message(state: StateRole) { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); match state { StateRole::Follower => r.become_follower(1, 2), StateRole::PreCandidate => r.become_pre_candidate(), StateRole::Candidate => r.become_candidate(), StateRole::Leader => { r.become_candidate(); r.become_leader(); } } let mut m = new_message(0, 0, MessageType::MsgAppend, 0); m.set_term(2); r.step(m).expect(""); assert_eq!(r.term, 2); assert_eq!(r.state, StateRole::Follower); } // test_reject_stale_term_message tests that if a server receives a request with // a stale term number, it rejects the request. // Our implementation ignores the request instead. // Reference: section 5.1 #[test] fn test_reject_stale_term_message() { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); let panic_before_step_state = Box::new(|_: &Message| { panic!("before step state function hook called unexpectedly") }); r.before_step_state = Some(panic_before_step_state); r.load_state(hard_state(2, 0, 0)); let mut m = new_message(0, 0, MessageType::MsgAppend, 0); m.set_term(r.term - 1); r.step(m).expect(""); } // test_start_as_follower tests that when servers start up, they begin as followers. // Reference: section 5.2 #[test] fn test_start_as_follower() { let r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); assert_eq!(r.state, StateRole::Follower); } // test_leader_bcast_beat tests that if the leader receives a heartbeat tick, // it will send a msgApp with m.Index = 0, m.LogTerm=0 and empty entries as // heartbeat to all followers. // Reference: section 5.2 #[test] fn test_leader_bcast_beat() { // heartbeat interval let hi = 1; let mut r = new_test_raft(1, vec![1, 2, 3], 10, hi, new_storage()); r.become_candidate(); r.become_leader(); for i in 0..10 { r.append_entry(&mut [empty_entry(0, i as u64 + 1)]); } for _ in 0..hi { r.tick(); } let mut msgs = r.read_messages(); msgs.sort_by_key(|m| format!("{:?}", m)); let new_message_ext = |f, to| { let mut m = new_message(f, to, MessageType::MsgHeartbeat, 0); m.set_term(1); m.set_commit(0); m }; let expect_msgs = vec![new_message_ext(1, 2), new_message_ext(1, 3)]; assert_eq!(msgs, expect_msgs); } #[test] fn test_follower_start_election() { test_nonleader_start_election(StateRole::Follower); } #[test] fn test_candidate_start_new_election() { test_nonleader_start_election(StateRole::Candidate); } // test_nonleader_start_election tests that if a follower receives no communication // over election timeout, it begins an election to choose a new leader. It // increments its current term and transitions to candidate state. It then // votes for itself and issues RequestVote RPCs in parallel to each of the // other servers in the cluster. // Reference: section 5.2 // Also if a candidate fails to obtain a majority, it will time out and // start a new election by incrementing its term and initiating another // round of RequestVote RPCs. // Reference: section 5.2 fn test_nonleader_start_election(state: StateRole) { // election timeout let et = 10; let mut r = new_test_raft(1, vec![1, 2, 3], et, 1, new_storage()); match state { StateRole::Follower => r.become_follower(1, 2), StateRole::Candidate => r.become_candidate(), _ => panic!("Only non-leader role is accepted."), } for _ in 1..2 * et { r.tick(); } assert_eq!(r.term, 2); assert_eq!(r.state, StateRole::Candidate); assert!(r.votes[&r.id]); let mut msgs = r.read_messages(); msgs.sort_by_key(|m| format!("{:?}", m)); let new_message_ext = |f, to| { let mut m = new_message(f, to, MessageType::MsgRequestVote, 0); m.set_term(2); m.set_log_term(0); m.set_index(0); m }; let expect_msgs = vec![new_message_ext(1, 2), new_message_ext(1, 3)]; assert_eq!(msgs, expect_msgs); } // test_leader_election_in_one_round_rpc tests all cases that may happen in // leader election during one round of RequestVote RPC: // a) it wins the election // b) it loses the election // c) it is unclear about the result // Reference: section 5.2 #[test] fn test_leader_election_in_one_round_rpc() { let mut tests = vec![ // win the election when receiving votes from a majority of the servers (1, map!(), StateRole::Leader), (3, map!(2 => true, 3 => true), StateRole::Leader), (3, map!(2 => true), StateRole::Leader), ( 5, map!(2 => true, 3 => true, 4 => true, 5 => true), StateRole::Leader, ), (5, map!(2 => true, 3 => true, 4 => true), StateRole::Leader), (5, map!(2 => true, 3 => true), StateRole::Leader), // return to follower state if it receives vote denial from a majority (3, map!(2 => false, 3 => false), StateRole::Follower), ( 5, map!(2 => false, 3 => false, 4 => false, 5 => false), StateRole::Follower, ), ( 5, map!(2 => true, 3 => false, 4 => false, 5 => false), StateRole::Follower, ), // stay in candidate if it does not obtain the majority (3, map!(), StateRole::Candidate), (5, map!(2 => true), StateRole::Candidate), (5, map!(2 => false, 3 => false), StateRole::Candidate), (5, map!(), StateRole::Candidate), ]; for (i, (size, votes, state)) in tests.drain(..).enumerate() { let mut r = new_test_raft(1, (1..size as u64 + 1).collect(), 10, 1, new_storage()); r.step(new_message(1, 1, MessageType::MsgHup, 0)).expect(""); for (id, vote) in votes { let mut m = new_message(id, 1, MessageType::MsgRequestVoteResponse, 0); m.set_reject(!vote); r.step(m).expect(""); } if r.state != state { panic!("#{}: state = {:?}, want {:?}", i, r.state, state); } if r.term != 1 { panic!("#{}: term = {}, want {}", i, r.term, 1); } } } // test_follower_vote tests that each follower will vote for at most one // candidate in a given term, on a first-come-first-served basis. // Reference: section 5.2 #[test] fn test_follower_vote() { let mut tests = vec![ (INVALID_ID, 1, false), (INVALID_ID, 2, false), (1, 1, false), (2, 2, false), (1, 2, true), (2, 1, true), ]; for (i, (vote, nvote, wreject)) in tests.drain(..).enumerate() { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); r.load_state(hard_state(1, 0, vote)); let mut m = new_message(nvote, 1, MessageType::MsgRequestVote, 0); m.set_term(1); r.step(m).expect(""); let msgs = r.read_messages(); let mut m = new_message(1, nvote, MessageType::MsgRequestVoteResponse, 0); m.set_term(1); m.set_reject(wreject); let expect_msgs = vec![m]; if msgs != expect_msgs { panic!("#{}: msgs = {:?}, want {:?}", i, msgs, expect_msgs); } } } // test_candidate_fallback tests that while waiting for votes, // if a candidate receives an AppendEntries RPC from another server claiming // to be leader whose term is at least as large as the candidate's current term, // it recognizes the leader as legitimate and returns to follower state. // Reference: section 5.2 #[test] fn test_candidate_fallback() { let new_message_ext = |f, to, term| { let mut m = new_message(f, to, MessageType::MsgAppend, 0); m.set_term(term); m }; let mut tests = vec![new_message_ext(2, 1, 1), new_message_ext(2, 1, 2)]; for (i, m) in tests.drain(..).enumerate() { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); r.step(new_message(1, 1, MessageType::MsgHup, 0)).expect(""); assert_eq!(r.state, StateRole::Candidate); let term = m.get_term(); r.step(m).expect(""); if r.state != StateRole::Follower { panic!( "#{}: state = {:?}, want {:?}", i, r.state, StateRole::Follower ); } if r.term != term { panic!("#{}: term = {}, want {}", i, r.term, term); } } } #[test] fn test_follower_election_timeout_randomized() { test_non_leader_election_timeout_randomized(StateRole::Follower); } #[test] fn test_candidate_election_timeout_randomized() { test_non_leader_election_timeout_randomized(StateRole::Candidate); } // test_non_leader_election_timeout_randomized tests that election timeout for // follower or candidate is randomized. // Reference: section 5.2 fn test_non_leader_election_timeout_randomized(state: StateRole) { let et = 10; let mut r = new_test_raft(1, vec![1, 2, 3], et, 1, new_storage()); let mut timeouts = map!(); for _ in 0..1000 * et { let term = r.term; match state { StateRole::Follower => r.become_follower(term + 1, 2), StateRole::Candidate => r.become_candidate(), _ => panic!("only non leader state is accepted!"), } let mut time = 0; while r.read_messages().is_empty() { r.tick(); time += 1; } timeouts.insert(time, true); } assert!(timeouts.len() <= et && timeouts.len() >= et - 1); for d in et + 1..2 * et { assert!(timeouts[&d]); } } #[test] fn test_follower_election_timeout_nonconflict() { test_nonleaders_election_timeout_nonconfict(StateRole::Follower); } #[test] fn test_acandidates_election_timeout_nonconf() { test_nonleaders_election_timeout_nonconfict(StateRole::Candidate); } // test_nonleaders_election_timeout_nonconfict tests that in most cases only a // single server(follower or candidate) will time out, which reduces the // likelihood of split vote in the new election. // Reference: section 5.2 fn test_nonleaders_election_timeout_nonconfict(state: StateRole) { let et = 10; let size = 5; let mut rs = Vec::with_capacity(size); let ids: Vec<u64> = (1..size as u64 + 1).collect(); for id in ids.iter().take(size) { rs.push(new_test_raft(*id, ids.clone(), et, 1, new_storage())); } let mut conflicts = 0; for _ in 0..1000 { for r in &mut rs { let term = r.term; match state { StateRole::Follower => r.become_follower(term + 1, INVALID_ID), StateRole::Candidate => r.become_candidate(), _ => panic!("non leader state is expect!"), } } let mut timeout_num = 0; while timeout_num == 0 { for r in &mut rs { r.tick(); if !r.read_messages().is_empty() { timeout_num += 1; } } } // several rafts time out at the same tick if timeout_num > 1 { conflicts += 1; } } assert!(conflicts as f64 / 1000.0 <= 0.3); } // test_leader_start_replication tests that when receiving client proposals,<|fim▁hole|>// the new entries. // Also, it writes the new entry into stable storage. // Reference: section 5.3 #[test] fn test_leader_start_replication() { let s = new_storage(); let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, s.clone()); r.become_candidate(); r.become_leader(); commit_noop_entry(&mut r, &s); let li = r.raft_log.last_index(); r.step(new_message(1, 1, MessageType::MsgPropose, 1)) .expect(""); assert_eq!(r.raft_log.last_index(), li + 1); assert_eq!(r.raft_log.committed, li); let mut msgs = r.read_messages(); msgs.sort_by_key(|m| format!("{:?}", m)); let wents = vec![new_entry(1, li + 1, SOME_DATA)]; let new_message_ext = |f, to, ents| { let mut m = new_message(f, to, MessageType::MsgAppend, 0); m.set_term(1); m.set_index(li); m.set_log_term(1); m.set_commit(li); m.set_entries(RepeatedField::from_vec(ents)); m }; let expect_msgs = vec![ new_message_ext(1, 2, wents.clone()), new_message_ext(1, 3, wents.clone()), ]; assert_eq!(msgs, expect_msgs); assert_eq!(r.raft_log.unstable_entries(), Some(&*wents)); } // test_leader_commit_entry tests that when the entry has been safely replicated, // the leader gives out the applied entries, which can be applied to its state // machine. // Also, the leader keeps track of the highest index it knows to be committed, // and it includes that index in future AppendEntries RPCs so that the other // servers eventually find out. // Reference: section 5.3 #[test] fn test_leader_commit_entry() { let s = new_storage(); let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, s.clone()); r.become_candidate(); r.become_leader(); commit_noop_entry(&mut r, &s); let li = r.raft_log.last_index(); r.step(new_message(1, 1, MessageType::MsgPropose, 1)) .expect(""); for m in r.read_messages() { r.step(accept_and_reply(m)).expect(""); } assert_eq!(r.raft_log.committed, li + 1); let wents = vec![new_entry(1, li + 1, SOME_DATA)]; assert_eq!(r.raft_log.next_entries(), Some(wents)); let mut msgs = r.read_messages(); msgs.sort_by_key(|m| format!("{:?}", m)); for (i, m) in msgs.drain(..).enumerate() { assert_eq!(i as u64 + 2, m.get_to()); assert_eq!(m.get_msg_type(), MessageType::MsgAppend); assert_eq!(m.get_commit(), li + 1); } } // test_leader_acknowledge_commit tests that a log entry is committed once the // leader that created the entry has replicated it on a majority of the servers. // Reference: section 5.3 #[test] fn test_leader_acknowledge_commit() { let mut tests = vec![ (1, map!(), true), (3, map!(), false), (3, map!(2 => true), true), (3, map!(2 => true, 3 => true), true), (5, map!(), false), (5, map!(2 => true), false), (5, map!(2 => true, 3 => true), true), (5, map!(2 => true, 3 => true, 4 => true), true), (5, map!(2 => true, 3 => true, 4 => true, 5 => true), true), ]; for (i, (size, acceptors, wack)) in tests.drain(..).enumerate() { let s = new_storage(); let mut r = new_test_raft(1, (1..size + 1).collect(), 10, 1, s.clone()); r.become_candidate(); r.become_leader(); commit_noop_entry(&mut r, &s); let li = r.raft_log.last_index(); r.step(new_message(1, 1, MessageType::MsgPropose, 1)) .expect(""); for m in r.read_messages() { if acceptors.contains_key(&m.get_to()) && acceptors[&m.get_to()] { r.step(accept_and_reply(m)).expect(""); } } let g = r.raft_log.committed > li; if g ^ wack { panic!("#{}: ack commit = {}, want {}", i, g, wack); } } } // test_leader_commit_preceding_entries tests that when leader commits a log entry, // it also commits all preceding entries in the leader’s log, including // entries created by previous leaders. // Also, it applies the entry to its local state machine (in log order). // Reference: section 5.3 #[test] fn test_leader_commit_preceding_entries() { let mut tests = vec![ vec![], vec![empty_entry(2, 1)], vec![empty_entry(1, 1), empty_entry(2, 2)], vec![empty_entry(1, 1)], ]; for (i, mut tt) in tests.drain(..).enumerate() { let s = new_storage(); s.wl().append(&tt).expect(""); let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, s); r.load_state(hard_state(2, 0, 0)); r.become_candidate(); r.become_leader(); r.step(new_message(1, 1, MessageType::MsgPropose, 1)) .expect(""); for m in r.read_messages() { r.step(accept_and_reply(m)).expect(""); } let li = tt.len() as u64; tt.append(&mut vec![ empty_entry(3, li + 1), new_entry(3, li + 2, SOME_DATA), ]); let g = r.raft_log.next_entries(); let wg = Some(tt); if g != wg { panic!("#{}: ents = {:?}, want {:?}", i, g, wg); } } } // test_follower_commit_entry tests that once a follower learns that a log entry // is committed, it applies the entry to its local state machine (in log order). // Reference: section 5.3 #[test] fn test_follower_commit_entry() { let mut tests = vec![ (vec![new_entry(1, 1, SOME_DATA)], 1), ( vec![ new_entry(1, 1, SOME_DATA), new_entry(1, 2, Some("somedata2")), ], 2, ), ( vec![ new_entry(1, 1, Some("somedata2")), new_entry(1, 2, SOME_DATA), ], 2, ), ( vec![ new_entry(1, 1, SOME_DATA), new_entry(1, 2, Some("somedata2")), ], 1, ), ]; for (i, (ents, commit)) in tests.drain(..).enumerate() { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); r.become_follower(1, 2); let mut m = new_message(2, 1, MessageType::MsgAppend, 0); m.set_term(1); m.set_commit(commit); m.set_entries(RepeatedField::from_vec(ents.clone())); r.step(m).expect(""); if r.raft_log.committed != commit { panic!( "#{}: committed = {}, want {}", i, r.raft_log.committed, commit ); } let wents = Some(ents[..commit as usize].to_vec()); let g = r.raft_log.next_entries(); if g != wents { panic!("#{}: next_ents = {:?}, want {:?}", i, g, wents); } } } // test_follower_check_msg_append tests that if the follower does not find an // entry in its log with the same index and term as the one in AppendEntries RPC, // then it refuses the new entries. Otherwise it replies that it accepts the // append entries. // Reference: section 5.3 #[test] fn test_follower_check_msg_append() { let ents = vec![empty_entry(1, 1), empty_entry(2, 2)]; let mut tests = vec![ // match with committed entries (0, 0, 1, false, 0), (ents[0].get_term(), ents[0].get_index(), 1, false, 0), // match with uncommitted entries (ents[1].get_term(), ents[1].get_index(), 2, false, 0), // unmatch with existing entry ( ents[0].get_term(), ents[1].get_index(), ents[1].get_index(), true, 2, ), // unexisting entry ( ents[1].get_term() + 1, ents[1].get_index() + 1, ents[1].get_index() + 1, true, 2, ), ]; for (i, (term, index, windex, wreject, wreject_hint)) in tests.drain(..).enumerate() { let s = new_storage(); s.wl().append(&ents).expect(""); let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, s); r.load_state(hard_state(0, 1, 0)); r.become_follower(2, 2); let mut m = new_message(2, 1, MessageType::MsgAppend, 0); m.set_term(2); m.set_log_term(term); m.set_index(index); r.step(m).expect(""); let msgs = r.read_messages(); let mut wm = new_message(1, 2, MessageType::MsgAppendResponse, 0); wm.set_term(2); wm.set_index(windex); if wreject { wm.set_reject(wreject); wm.set_reject_hint(wreject_hint); } let expect_msgs = vec![wm]; if msgs != expect_msgs { panic!("#{}: msgs = {:?}, want {:?}", i, msgs, expect_msgs); } } } // test_follower_append_entries tests that when AppendEntries RPC is valid, // the follower will delete the existing conflict entry and all that follow it, // and append any new entries not already in the log. // Also, it writes the new entry into stable storage. // Reference: section 5.3 #[test] fn test_follower_append_entries() { let mut tests = vec![ ( 2, 2, vec![empty_entry(3, 3)], vec![empty_entry(1, 1), empty_entry(2, 2), empty_entry(3, 3)], vec![empty_entry(3, 3)], ), ( 1, 1, vec![empty_entry(3, 2), empty_entry(4, 3)], vec![empty_entry(1, 1), empty_entry(3, 2), empty_entry(4, 3)], vec![empty_entry(3, 2), empty_entry(4, 3)], ), ( 0, 0, vec![empty_entry(1, 1)], vec![empty_entry(1, 1), empty_entry(2, 2)], vec![], ), ( 0, 0, vec![empty_entry(3, 1)], vec![empty_entry(3, 1)], vec![empty_entry(3, 1)], ), ]; for (i, (index, term, ents, wents, wunstable)) in tests.drain(..).enumerate() { let s = new_storage(); s.wl() .append(&[empty_entry(1, 1), empty_entry(2, 2)]) .expect(""); let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, s); r.become_follower(2, 2); let mut m = new_message(2, 1, MessageType::MsgAppend, 0); m.set_term(2); m.set_log_term(term); m.set_index(index); m.set_entries(RepeatedField::from_vec(ents)); r.step(m).expect(""); let g = r.raft_log.all_entries(); if g != wents { panic!("#{}: ents = {:?}, want {:?}", i, g, wents); } let g = r.raft_log.unstable_entries(); let wunstable = if wunstable.is_empty() { None } else { Some(&*wunstable) }; if g != wunstable { panic!("#{}: unstable_entries = {:?}, want {:?}", i, g, wunstable); } } } // test_leader_sync_follower_log tests that the leader could bring a follower's log // into consistency with its own. // Reference: section 5.3, figure 7 #[test] fn test_leader_sync_follower_log() { let ents = vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), empty_entry(4, 5), empty_entry(5, 6), empty_entry(5, 7), empty_entry(6, 8), empty_entry(6, 9), empty_entry(6, 10), ]; let term = 8u64; let mut tests = vec![ vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), empty_entry(4, 5), empty_entry(5, 6), empty_entry(5, 7), empty_entry(6, 8), empty_entry(6, 9), ], vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), ], vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), empty_entry(4, 5), empty_entry(5, 6), empty_entry(5, 7), empty_entry(6, 8), empty_entry(6, 9), empty_entry(6, 10), empty_entry(6, 11), ], vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), empty_entry(4, 5), empty_entry(5, 6), empty_entry(5, 7), empty_entry(6, 8), empty_entry(6, 9), empty_entry(6, 10), empty_entry(7, 11), empty_entry(7, 12), ], vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(4, 4), empty_entry(4, 5), empty_entry(4, 6), empty_entry(4, 7), ], vec![ empty_entry(0, 0), empty_entry(1, 1), empty_entry(1, 2), empty_entry(1, 3), empty_entry(2, 4), empty_entry(2, 5), empty_entry(2, 6), empty_entry(3, 7), empty_entry(3, 8), empty_entry(3, 9), empty_entry(3, 10), empty_entry(3, 11), ], ]; for (i, tt) in tests.drain(..).enumerate() { let lead_store = new_storage(); lead_store.wl().append(&ents).expect(""); let mut lead = new_test_raft(1, vec![1, 2, 3], 10, 1, lead_store); let last_index = lead.raft_log.last_index(); lead.load_state(hard_state(term, last_index, 0)); let follower_store = new_storage(); follower_store.wl().append(&tt).expect(""); let mut follower = new_test_raft(2, vec![1, 2, 3], 10, 1, follower_store); follower.load_state(hard_state(term - 1, 0, 0)); // It is necessary to have a three-node cluster. // The second may have more up-to-date log than the first one, so the // first node needs the vote from the third node to become the leader. let mut n = Network::new(vec![Some(lead), Some(follower), NOP_STEPPER]); n.send(vec![new_message(1, 1, MessageType::MsgHup, 0)]); // The election occurs in the term after the one we loaded with // lead.load_state above. let mut m = new_message(3, 1, MessageType::MsgRequestVoteResponse, 0); m.set_term(term + 1); n.send(vec![m]); let mut m = new_message(1, 1, MessageType::MsgPropose, 0); m.set_entries(RepeatedField::from_vec(vec![Entry::new()])); n.send(vec![m]); let lead_str = ltoa(&n.peers[&1].raft_log); let follower_str = ltoa(&n.peers[&2].raft_log); if lead_str != follower_str { panic!( "#{}: lead str: {}, follower_str: {}", i, lead_str, follower_str ); } } } // test_vote_request tests that the vote request includes information about the candidate’s log // and are sent to all of the other nodes. // Reference: section 5.4.1 #[test] fn test_vote_request() { let mut tests = vec![ (vec![empty_entry(1, 1)], 2), (vec![empty_entry(1, 1), empty_entry(2, 2)], 3), ]; for (j, (ents, wterm)) in tests.drain(..).enumerate() { let mut r = new_test_raft(1, vec![1, 2, 3], 10, 1, new_storage()); let mut m = new_message(2, 1, MessageType::MsgAppend, 0); m.set_term(wterm - 1); m.set_log_term(0); m.set_index(0); m.set_entries(RepeatedField::from_vec(ents.clone())); r.step(m).expect(""); r.read_messages(); for _ in 1..r.get_election_timeout() * 2 { r.tick_election(); } let mut msgs = r.read_messages(); msgs.sort_by_key(|m| format!("{:?}", m)); if msgs.len() != 2 { panic!("#{}: msg count = {}, want 2", j, msgs.len()); } for (i, m) in msgs.iter().enumerate() { if m.get_msg_type() != MessageType::MsgRequestVote { panic!( "#{}.{}: msg_type = {:?}, want {:?}", j, i, m.get_msg_type(), MessageType::MsgRequestVote ); } if m.get_to() != i as u64 + 2 { panic!("#{}.{}: to = {}, want {}", j, i, m.get_to(), i + 2); } if m.get_term() != wterm { panic!("#{}.{}: term = {}, want {}", j, i, m.get_term(), wterm); } let windex = ents.last().unwrap().get_index(); let wlogterm = ents.last().unwrap().get_term(); if m.get_index() != windex { panic!("#{}.{}: index = {}, want {}", j, i, m.get_index(), windex); } if m.get_log_term() != wlogterm { panic!( "#{}.{}: log_term = {}, want {}", j, i, m.get_log_term(), wlogterm ); } } } } // test_voter tests the voter denies its vote if its own log is more up-to-date // than that of the candidate. // Reference: section 5.4.1 #[test] fn test_voter() { let mut tests = vec![ // same logterm (vec![empty_entry(1, 1)], 1, 1, false), (vec![empty_entry(1, 1)], 1, 2, false), (vec![empty_entry(1, 1), empty_entry(1, 2)], 1, 1, true), // candidate higher logterm (vec![empty_entry(1, 1)], 2, 1, false), (vec![empty_entry(1, 1)], 2, 2, false), (vec![empty_entry(1, 1), empty_entry(1, 2)], 2, 1, false), // voter higher logterm (vec![empty_entry(2, 1)], 1, 1, true), (vec![empty_entry(2, 1)], 1, 2, true), (vec![empty_entry(2, 1), empty_entry(1, 2)], 1, 1, true), ]; for (i, (ents, log_term, index, wreject)) in tests.drain(..).enumerate() { let s = new_storage(); s.wl().append(&ents).expect(""); let mut r = new_test_raft(1, vec![1, 2], 10, 1, s); let mut m = new_message(2, 1, MessageType::MsgRequestVote, 0); m.set_term(3); m.set_log_term(log_term); m.set_index(index); r.step(m).expect(""); let msgs = r.read_messages(); if msgs.len() != 1 { panic!("#{}: msg count = {}, want {}", i, msgs.len(), 1); } if msgs[0].get_msg_type() != MessageType::MsgRequestVoteResponse { panic!( "#{}: msg_type = {:?}, want {:?}", i, msgs[0].get_msg_type(), MessageType::MsgRequestVoteResponse ); } if msgs[0].get_reject() != wreject { panic!( "#{}: reject = {}, want {}", i, msgs[0].get_reject(), wreject ); } } } // TestLeaderOnlyCommitsLogFromCurrentTerm tests that only log entries from the leader’s // current term are committed by counting replicas. // Reference: section 5.4.2 #[test] fn test_leader_only_commits_log_from_current_term() { let ents = vec![empty_entry(1, 1), empty_entry(2, 2)]; let mut tests = vec![ // do not commit log entries in previous terms (1, 0), (2, 0), // commit log in current term (3, 3), ]; for (i, (index, wcommit)) in tests.drain(..).enumerate() { let store = new_storage(); store.wl().append(&ents).expect(""); let mut r = new_test_raft(1, vec![1, 2], 10, 1, store); r.load_state(hard_state(2, 0, 0)); // become leader at term 3 r.become_candidate(); r.become_leader(); r.read_messages(); // propose a entry to current term r.step(new_message(1, 1, MessageType::MsgPropose, 1)) .expect(""); let mut m = new_message(2, 1, MessageType::MsgAppendResponse, 0); m.set_term(r.term); m.set_index(index); r.step(m).expect(""); if r.raft_log.committed != wcommit { panic!( "#{}: commit = {}, want {}", i, r.raft_log.committed, wcommit ); } } }<|fim▁end|>
// the leader appends the proposal to its log as a new entry, then issues // AppendEntries RPCs in parallel to each of the other servers to replicate // the entry. Also, when sending an AppendEntries RPC, the leader includes // the index and term of the entry in its log that immediately precedes
<|file_name|>AspectRatioUtil.java<|end_file_name|><|fim▁begin|>/* * Copyright 2013, Edmodo, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in compliance with the License. * You may obtain a copy of the License in the LICENSE file, or at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package com.yuqiaotech.erp.editimage.cropper.util; import android.graphics.RectF; import android.support.annotation.NonNull; /** * Utility class for handling calculations involving a fixed aspect ratio. */ public class AspectRatioUtil { /** * Calculates the aspect ratio given a rectangle. */ public static float calculateAspectRatio(float left, float top, float right, float bottom) { final float width = right - left; final float height = bottom - top; return width / height; } /** * Calculates the aspect ratio given a rectangle. */ public static float calculateAspectRatio(@NonNull RectF rect) { return rect.width() / rect.height(); } /** * Calculates the x-coordinate of the left edge given the other sides of the rectangle and an * aspect ratio. */ public static float calculateLeft(float top, float right, float bottom, float targetAspectRatio) { final float height = bottom - top; // targetAspectRatio = width / height // width = targetAspectRatio * height // right - left = targetAspectRatio * height return right - (targetAspectRatio * height); } /** * Calculates the y-coordinate of the top edge given the other sides of the rectangle and an * aspect ratio. */ public static float calculateTop(float left, float right, float bottom, float targetAspectRatio) { final float width = right - left; // targetAspectRatio = width / height // width = targetAspectRatio * height // height = width / targetAspectRatio // bottom - top = width / targetAspectRatio return bottom - (width / targetAspectRatio); } /**<|fim▁hole|> * Calculates the x-coordinate of the right edge given the other sides of the rectangle and an * aspect ratio. */ public static float calculateRight(float left, float top, float bottom, float targetAspectRatio) { final float height = bottom - top; // targetAspectRatio = width / height // width = targetAspectRatio * height // right - left = targetAspectRatio * height return (targetAspectRatio * height) + left; } /** * Calculates the y-coordinate of the bottom edge given the other sides of the rectangle and an * aspect ratio. */ public static float calculateBottom(float left, float top, float right, float targetAspectRatio) { final float width = right - left; // targetAspectRatio = width / height // width = targetAspectRatio * height // height = width / targetAspectRatio // bottom - top = width / targetAspectRatio return (width / targetAspectRatio) + top; } /** * Calculates the width of a rectangle given the top and bottom edges and an aspect ratio. */ public static float calculateWidth(float height, float targetAspectRatio) { return targetAspectRatio * height; } /** * Calculates the height of a rectangle given the left and right edges and an aspect ratio. */ public static float calculateHeight(float width, float targetAspectRatio) { return width / targetAspectRatio; } }<|fim▁end|>
<|file_name|>0007_auto_20160318_2122.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('draw', '0006_auto_20160314_1817'), ] operations = [ migrations.AlterField( model_name='physicalentity', name='display_name', field=models.CharField(blank=True, max_length=1000), ), ]<|fim▁end|>
# Generated by Django 1.9.2 on 2016-03-18 21:22
<|file_name|>TUTableImpl.js<|end_file_name|><|fim▁begin|>/* * Name: TUTableImpl.js * Module: * Location: Norris/test/unit * Date: 2015-05-25 * Version: v1.00 * * History: * * ================================================================================ * Version Date Programmer Changes * ================================================================================ * v1.00 2015-06-15 Carlon Chiara Approved * ================================================================================ * v0.02 2015-06-02 Pavanello Fabio Matteo Verify * ================================================================================ * v0.01 2015-05-25 Bucco Riccardo Creation * ================================================================================ */ var TableImpl = require('../../main/DataModel/NorrisChart/TableImpl.js'); var assert = require("assert"); describe('TableImpl', function(){ describe('TableImpl(id: String)', function(){ it('should memorize the right type of the chart',function(){ var table = new TableImpl('randomID'); assert.equal('table', table.type); }); it('should memorize the right id of the chart',function(){ var table = new TableImpl('randomID'); assert.equal('randomID', table.uid); }); it('should memorize some default values for the keys of the settings',function(){ var table = new TableImpl('randomID'); var defaults = { title: '', description : 'This is a table.',<|fim▁hole|> maxItems : 10 , showTableGrid : true , newLinePosition : 'bottom', allowFilter: false, allowSort: false, pageSize: -1 }; assert.deepEqual(defaults,table.settings); }); }) });<|fim▁end|>
<|file_name|>I2C.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Adafruit Industries # Author: Tony DiCola # Based on Adafruit_I2C.py created by Kevin Townsend. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in<|fim▁hole|># IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import logging import os import subprocess import Adafruit_GPIO.Platform as Platform def reverseByteOrder(data): """DEPRECATED: See https://github.com/adafruit/Adafruit_Python_GPIO/issues/48""" # # Courtesy Vishal Sapre # byteCount = len(hex(data)[2:].replace('L','')[::2]) # val = 0 # for i in range(byteCount): # val = (val << 8) | (data & 0xff) # data >>= 8 # return val raise RuntimeError('reverseByteOrder is deprecated! See: https://github.com/adafruit/Adafruit_Python_GPIO/issues/48') def get_default_bus(): """Return the default bus number based on the device platform. For a Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned. For a Beaglebone Black the first user accessible bus, 1, will be returned. """ plat = Platform.platform_detect() if plat == Platform.RASPBERRY_PI: if Platform.pi_revision() == 1: # Revision 1 Pi uses I2C bus 0. return 0 else: # Revision 2 Pi uses I2C bus 1. return 1 elif plat == Platform.BEAGLEBONE_BLACK: # Beaglebone Black has multiple I2C buses, default to 1 (P9_19 and P9_20). return 1 else: raise RuntimeError('Could not determine default I2C bus for platform.') def get_i2c_device(address, busnum=None, i2c_interface=None, **kwargs): """Return an I2C device for the specified address and on the specified bus. If busnum isn't specified, the default I2C bus for the platform will attempt to be detected. """ if busnum is None: busnum = get_default_bus() return Device(address, busnum, i2c_interface, **kwargs) def require_repeated_start(): """Enable repeated start conditions for I2C register reads. This is the normal behavior for I2C, however on some platforms like the Raspberry Pi there are bugs which disable repeated starts unless explicitly enabled with this function. See this thread for more details: http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840 """ plat = Platform.platform_detect() if plat == Platform.RASPBERRY_PI and os.path.exists('/sys/module/i2c_bcm2708/parameters/combined'): # On the Raspberry Pi there is a bug where register reads don't send a # repeated start condition like the kernel smbus I2C driver functions # define. As a workaround this bit in the BCM2708 driver sysfs tree can # be changed to enable I2C repeated starts. subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True) subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True) # Other platforms are a no-op because they (presumably) have the correct # behavior and send repeated starts. class Device(object): """Class for communicating with an I2C device using the adafruit-pureio pure python smbus library, or other smbus compatible I2C interface. Allows reading and writing 8-bit, 16-bit, and byte array values to registers on the device.""" def __init__(self, address, busnum, i2c_interface=None): """Create an instance of the I2C device at the specified address on the specified I2C bus number.""" self._address = address if i2c_interface is None: # Use pure python I2C interface if none is specified. import Adafruit_PureIO.smbus self._bus = Adafruit_PureIO.smbus.SMBus(busnum) else: # Otherwise use the provided class to create an smbus interface. self._bus = i2c_interface(busnum) self._logger = logging.getLogger('Adafruit_I2C.Device.Bus.{0}.Address.{1:#0X}' \ .format(busnum, address)) def writeRaw8(self, value): """Write an 8-bit value on the bus (without register).""" value = value & 0xFF self._bus.write_byte(self._address, value) self._logger.debug("Wrote 0x%02X", value) def write8(self, register, value): """Write an 8-bit value to the specified register.""" value = value & 0xFF self._bus.write_byte_data(self._address, register, value) self._logger.debug("Wrote 0x%02X to register 0x%02X", value, register) def write16(self, register, value): """Write a 16-bit value to the specified register.""" value = value & 0xFFFF self._bus.write_word_data(self._address, register, value) self._logger.debug("Wrote 0x%04X to register pair 0x%02X, 0x%02X", value, register, register+1) def writeList(self, register, data): """Write bytes to the specified register.""" self._bus.write_i2c_block_data(self._address, register, data) self._logger.debug("Wrote to register 0x%02X: %s", register, data) def readList(self, register, length): """Read a length number of bytes from the specified register. Results will be returned as a bytearray.""" results = self._bus.read_i2c_block_data(self._address, register, length) self._logger.debug("Read the following from register 0x%02X: %s", register, results) return results def readRaw8(self): """Read an 8-bit value on the bus (without register).""" result = self._bus.read_byte(self._address) & 0xFF self._logger.debug("Read 0x%02X", result) return result def readU8(self, register): """Read an unsigned byte from the specified register.""" result = self._bus.read_byte_data(self._address, register) & 0xFF self._logger.debug("Read 0x%02X from register 0x%02X", result, register) return result def readS8(self, register): """Read a signed byte from the specified register.""" result = self.readU8(register) if result > 127: result -= 256 return result def readU16(self, register, little_endian=True): """Read an unsigned 16-bit value from the specified register, with the specified endianness (default little endian, or least significant byte first).""" result = self._bus.read_word_data(self._address,register) & 0xFFFF self._logger.debug("Read 0x%04X from register pair 0x%02X, 0x%02X", result, register, register+1) # Swap bytes if using big endian because read_word_data assumes little # endian on ARM (little endian) systems. if not little_endian: result = ((result << 8) & 0xFF00) + (result >> 8) return result def readS16(self, register, little_endian=True): """Read a signed 16-bit value from the specified register, with the specified endianness (default little endian, or least significant byte first).""" result = self.readU16(register, little_endian) if result > 32767: result -= 65536 return result def readU16LE(self, register): """Read an unsigned 16-bit value from the specified register, in little endian byte order.""" return self.readU16(register, little_endian=True) def readU16BE(self, register): """Read an unsigned 16-bit value from the specified register, in big endian byte order.""" return self.readU16(register, little_endian=False) def readS16LE(self, register): """Read a signed 16-bit value from the specified register, in little endian byte order.""" return self.readS16(register, little_endian=True) def readS16BE(self, register): """Read a signed 16-bit value from the specified register, in big endian byte order.""" return self.readS16(register, little_endian=False)<|fim▁end|>
# all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
<|file_name|>ItemSharkEgg.java<|end_file_name|><|fim▁begin|>package moCreatures.items; import net.minecraft.src.EntityPlayer; import net.minecraft.src.Item; import net.minecraft.src.ItemStack; import net.minecraft.src.World; import moCreatures.entities.EntitySharkEgg; // Decompiled by Jad v1.5.8g. Copyright 2001 Pavel Kouznetsov. // Jad home page: http://www.kpdus.com/jad.html // Decompiler options: packimports(3) braces deadcode public class ItemSharkEgg extends Item { public ItemSharkEgg(int i) { super(i); maxStackSize = 16; } public ItemStack onItemRightClick(ItemStack itemstack, World world, EntityPlayer entityplayer) { itemstack.stackSize--; if(!world.singleplayerWorld) { EntitySharkEgg entitysharkegg = new EntitySharkEgg(world); entitysharkegg.setPosition(entityplayer.posX, entityplayer.posY, entityplayer.posZ); world.entityJoinedWorld(entitysharkegg); entitysharkegg.motionY += world.rand.nextFloat() * 0.05F; entitysharkegg.motionX += (world.rand.nextFloat() - world.rand.nextFloat()) * 0.3F; entitysharkegg.motionZ += (world.rand.nextFloat() - world.rand.nextFloat()) * 0.3F; } return itemstack; <|fim▁hole|><|fim▁end|>
} }
<|file_name|>constellation.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! The `Constellation`, Servo's Grand Central Station //! //! The constellation tracks all information kept globally by the //! browser engine, which includes: //! //! * The set of all `EventLoop` objects. Each event loop is //! the constellation's view of a script thread. The constellation //! interacts with a script thread by message-passing. //! //! * The set of all `Pipeline` objects. Each pipeline gives the //! constellation's view of a `Window`, with its script thread and //! layout threads. Pipelines may share script threads, but not //! layout threads. //! //! * The set of all `BrowsingContext` objects. Each browsing context //! gives the constellation's view of a `WindowProxy`. //! Each browsing context stores an independent //! session history, created by navigation. The session //! history can be traversed, for example by the back and forwards UI, //! so each session history maintains a list of past and future pipelines, //! as well as the current active pipeline. //! //! There are two kinds of browsing context: top-level ones (for //! example tabs in a browser UI), and nested ones (typically caused //! by `iframe` elements). Browsing contexts have a hierarchy //! (typically caused by `iframe`s containing `iframe`s), giving rise //! to a forest whose roots are top-level browsing context. The logical //! relationship between these types is: //! //! ``` //! +------------+ +------------+ +---------+ //! | Browsing | ------parent?------> | Pipeline | --event_loop--> | Event | //! | Context | ------current------> | | | Loop | //! | | ------prev*--------> | | <---pipeline*-- | | //! | | ------next*--------> | | +---------+ //! | | | | //! | | <-top_level--------- | | //! | | <-browsing_context-- | | //! +------------+ +------------+ //! ``` // //! The constellation also maintains channels to threads, including: //! //! * The script and layout threads. //! * The graphics compositor. //! * The font cache, image cache, and resource manager, which load //! and cache shared fonts, images, or other resources. //! * The service worker manager. //! * The devtools, debugger and webdriver servers. //! //! The constellation passes messages between the threads, and updates its state //! to track the evolving state of the browsing context tree. //! //! The constellation acts as a logger, tracking any `warn!` messages from threads, //! and converting any `error!` or `panic!` into a crash report. //! //! Since there is only one constellation, and its responsibilities include crash reporting, //! it is very important that it does not panic. //! //! It's also important that the constellation not deadlock. In particular, we need //! to be careful that we don't introduce any cycles in the can-block-on relation. //! Blocking is typically introduced by `receiver.recv()`, which blocks waiting for the //! sender to send some data. Servo tries to achieve deadlock-freedom by using the following //! can-block-on relation: //! //! * Layout can block on canvas //! * Layout can block on font cache //! * Layout can block on image cache //! * Constellation can block on compositor //! * Constellation can block on embedder //! * Constellation can block on layout //! * Script can block on anything (other than script) //! * Blocking is transitive (if T1 can block on T2 and T2 can block on T3 then T1 can block on T3) //! * Nothing can block on itself! //! //! There is a complexity intoduced by IPC channels, since they do not support //! non-blocking send. This means that as well as `receiver.recv()` blocking, //! `sender.send(data)` can also block when the IPC buffer is full. For this reason it is //! very important that all IPC receivers where we depend on non-blocking send //! use a router to route IPC messages to an mpsc channel. The reason why that solves //! the problem is that under the hood, the router uses a dedicated thread to forward //! messages, and: //! //! * Anything (other than a routing thread) can block on a routing thread //! //! See https://github.com/servo/servo/issues/14704 use crate::browsingcontext::NewBrowsingContextInfo; use crate::browsingcontext::{ AllBrowsingContextsIterator, BrowsingContext, FullyActiveBrowsingContextsIterator, }; use crate::event_loop::EventLoop; use crate::network_listener::NetworkListener; use crate::pipeline::{InitialPipelineState, Pipeline}; use crate::session_history::{ JointSessionHistory, NeedsToReload, SessionHistoryChange, SessionHistoryDiff, }; use crate::timer_scheduler::TimerScheduler; use background_hang_monitor::HangMonitorRegister; use backtrace::Backtrace; use bluetooth_traits::BluetoothRequest; use canvas::canvas_paint_thread::CanvasPaintThread; use canvas_traits::canvas::{CanvasId, CanvasMsg}; use canvas_traits::webgl::WebGLThreads; use compositing::compositor_thread::CompositorProxy; use compositing::compositor_thread::Msg as ToCompositorMsg; use compositing::SendableFrameTree; use crossbeam_channel::{after, never, unbounded, Receiver, Sender}; use devtools_traits::{ChromeToDevtoolsControlMsg, DevtoolsControlMsg}; use embedder_traits::{Cursor, EmbedderMsg, EmbedderProxy, EventLoopWaker}; use euclid::{default::Size2D as UntypedSize2D, Size2D}; use gfx::font_cache_thread::FontCacheThread; use gfx_traits::Epoch; use ipc_channel::ipc::{self, IpcReceiver, IpcSender}; use ipc_channel::router::ROUTER; use ipc_channel::Error as IpcError; use keyboard_types::webdriver::Event as WebDriverInputEvent; use keyboard_types::KeyboardEvent; use layout_traits::LayoutThreadFactory; use log::{Level, LevelFilter, Log, Metadata, Record}; use media::{GLPlayerThreads, WindowGLContext}; use msg::constellation_msg::{BackgroundHangMonitorRegister, HangMonitorAlert, SamplerControlMsg}; use msg::constellation_msg::{ BrowsingContextGroupId, BrowsingContextId, HistoryStateId, PipelineId, TopLevelBrowsingContextId, }; use msg::constellation_msg::{ MessagePortId, MessagePortRouterId, PipelineNamespace, PipelineNamespaceId, PipelineNamespaceRequest, TraversalDirection, }; use net_traits::pub_domains::reg_host; use net_traits::request::RequestBuilder; use net_traits::storage_thread::{StorageThreadMsg, StorageType}; use net_traits::{self, FetchResponseMsg, IpcSend, ResourceThreads}; use profile_traits::mem; use profile_traits::time; use script_traits::CompositorEvent::{MouseButtonEvent, MouseMoveEvent}; use script_traits::MouseEventType; use script_traits::{webdriver_msg, LogEntry, ScriptToConstellationChan, ServiceWorkerMsg}; use script_traits::{ AnimationState, AnimationTickType, AuxiliaryBrowsingContextLoadInfo, CompositorEvent, }; use script_traits::{ ConstellationControlMsg, ConstellationMsg as FromCompositorMsg, DiscardBrowsingContext, }; use script_traits::{DocumentActivity, DocumentState, LayoutControlMsg, LoadData, LoadOrigin}; use script_traits::{HistoryEntryReplacement, IFrameSizeMsg, WindowSizeData, WindowSizeType}; use script_traits::{ IFrameLoadInfo, IFrameLoadInfoWithData, IFrameSandboxState, TimerSchedulerMsg, }; use script_traits::{LayoutMsg as FromLayoutMsg, ScriptMsg as FromScriptMsg, ScriptThreadFactory}; use script_traits::{MessagePortMsg, PortMessageTask, StructuredSerializedData}; use script_traits::{SWManagerMsg, ScopeThings, UpdatePipelineIdReason, WebDriverCommandMsg}; use serde::{Deserialize, Serialize}; use servo_config::{opts, pref}; use servo_rand::{random, Rng, ServoRng, SliceRandom}; use servo_remutex::ReentrantMutex; use servo_url::{Host, ImmutableOrigin, ServoUrl}; use std::borrow::ToOwned; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet, VecDeque}; use std::marker::PhantomData; use std::mem::replace; use std::process; use std::rc::{Rc, Weak}; use std::sync::Arc; use std::thread; use style_traits::viewport::ViewportConstraints; use style_traits::CSSPixel; use webvr_traits::{WebVREvent, WebVRMsg}; type PendingApprovalNavigations = HashMap<PipelineId, (LoadData, HistoryEntryReplacement)>; #[derive(Debug)] /// The state used by MessagePortInfo to represent the various states the port can be in. enum TransferState { /// The port is currently managed by a given global, /// identified by its router id. Managed(MessagePortRouterId), /// The port is currently in-transfer, /// and incoming tasks should be buffered until it becomes managed again. TransferInProgress(VecDeque<PortMessageTask>), /// The entangled port has been removed while the port was in-transfer, /// the current port should be removed as well once it is managed again. EntangledRemoved, } #[derive(Debug)] /// Info related to a message-port tracked by the constellation. struct MessagePortInfo { /// The current state of the messageport. state: TransferState, /// The id of the entangled port, if any. entangled_with: Option<MessagePortId>, } /// Servo supports tabs (referred to as browsers), so `Constellation` needs to /// store browser specific data for bookkeeping. struct Browser { /// The currently focused browsing context in this browser for key events. /// The focused pipeline is the current entry of the focused browsing /// context. focused_browsing_context_id: BrowsingContextId, /// The joint session history for this browser. session_history: JointSessionHistory, } /// A browsing context group. /// /// https://html.spec.whatwg.org/multipage/#browsing-context-group #[derive(Clone, Default)] struct BrowsingContextGroup { /// A browsing context group holds a set of top-level browsing contexts. top_level_browsing_context_set: HashSet<TopLevelBrowsingContextId>, /// The set of all event loops in this BrowsingContextGroup. /// We store the event loops in a map /// indexed by registered domain name (as a `Host`) to event loops. /// It is important that scripts with the same eTLD+1, /// who are part of the same browsing-context group /// share an event loop, since they can use `document.domain` /// to become same-origin, at which point they can share DOM objects. event_loops: HashMap<Host, Weak<EventLoop>>, } /// The `Constellation` itself. In the servo browser, there is one /// constellation, which maintains all of the browser global data. /// In embedded applications, there may be more than one constellation, /// which are independent of each other. /// /// The constellation may be in a different process from the pipelines, /// and communicates using IPC. /// /// It is parameterized over a `LayoutThreadFactory` and a /// `ScriptThreadFactory` (which in practice are implemented by /// `LayoutThread` in the `layout` crate, and `ScriptThread` in /// the `script` crate). Script and layout communicate using a `Message` /// type. pub struct Constellation<Message, LTF, STF> { /// An ipc-sender/threaded-receiver pair /// to facilitate installing pipeline namespaces in threads /// via a per-process installer. namespace_receiver: Receiver<Result<PipelineNamespaceRequest, IpcError>>, namespace_sender: IpcSender<PipelineNamespaceRequest>, /// An IPC channel for script threads to send messages to the constellation. /// This is the script threads' view of `script_receiver`. script_sender: IpcSender<(PipelineId, FromScriptMsg)>, /// A channel for the constellation to receive messages from script threads. /// This is the constellation's view of `script_sender`. script_receiver: Receiver<Result<(PipelineId, FromScriptMsg), IpcError>>, /// A handle to register components for hang monitoring. /// None when in multiprocess mode. background_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>, /// Channels to control all sampling profilers. sampling_profiler_control: Vec<IpcSender<SamplerControlMsg>>, /// A channel for the background hang monitor to send messages /// to the constellation. background_hang_monitor_sender: IpcSender<HangMonitorAlert>, /// A channel for the constellation to receiver messages /// from the background hang monitor. background_hang_monitor_receiver: Receiver<Result<HangMonitorAlert, IpcError>>, /// An IPC channel for layout threads to send messages to the constellation. /// This is the layout threads' view of `layout_receiver`. layout_sender: IpcSender<FromLayoutMsg>, /// A channel for the constellation to receive messages from layout threads. /// This is the constellation's view of `layout_sender`. layout_receiver: Receiver<Result<FromLayoutMsg, IpcError>>, /// A channel for network listener to send messages to the constellation. network_listener_sender: Sender<(PipelineId, FetchResponseMsg)>, /// A channel for the constellation to receive messages from network listener. network_listener_receiver: Receiver<(PipelineId, FetchResponseMsg)>, /// A channel for the constellation to receive messages from the compositor thread. compositor_receiver: Receiver<FromCompositorMsg>, /// A channel through which messages can be sent to the embedder. embedder_proxy: EmbedderProxy, /// A channel (the implementation of which is port-specific) for the /// constellation to send messages to the compositor thread. compositor_proxy: CompositorProxy, /// The last frame tree sent to WebRender, denoting the browser (tab) user /// has currently selected. This also serves as the key to retrieve data /// about the current active browser from `browsers`. active_browser_id: Option<TopLevelBrowsingContextId>, /// Bookkeeping data for all browsers in constellation. browsers: HashMap<TopLevelBrowsingContextId, Browser>, /// Channels for the constellation to send messages to the public /// resource-related threads. There are two groups of resource threads: one /// for public browsing, and one for private browsing. public_resource_threads: ResourceThreads, /// Channels for the constellation to send messages to the private /// resource-related threads. There are two groups of resource /// threads: one for public browsing, and one for private /// browsing. private_resource_threads: ResourceThreads, /// A channel for the constellation to send messages to the font /// cache thread. font_cache_thread: FontCacheThread, /// A channel for the constellation to send messages to the /// debugger thread. debugger_chan: Option<debugger::Sender>, /// A channel for the constellation to send messages to the /// devtools thread. devtools_chan: Option<Sender<DevtoolsControlMsg>>, /// An IPC channel for the constellation to send messages to the /// bluetooth thread. bluetooth_thread: IpcSender<BluetoothRequest>, /// An IPC channel for the constellation to send messages to the /// Service Worker Manager thread. swmanager_chan: Option<IpcSender<ServiceWorkerMsg>>, /// An IPC channel for Service Worker Manager threads to send /// messages to the constellation. This is the SW Manager thread's /// view of `swmanager_receiver`. swmanager_sender: IpcSender<SWManagerMsg>, /// A channel for the constellation to receive messages from the /// Service Worker Manager thread. This is the constellation's view of /// `swmanager_sender`. swmanager_receiver: Receiver<Result<SWManagerMsg, IpcError>>, /// A channel for the constellation to send messages to the /// time profiler thread. time_profiler_chan: time::ProfilerChan, /// A channel for the constellation to send messages to the /// memory profiler thread. mem_profiler_chan: mem::ProfilerChan, /// A channel for a pipeline to schedule timer events. scheduler_chan: IpcSender<TimerSchedulerMsg>, /// The receiver to which the IPC requests from scheduler_chan will be forwarded. scheduler_receiver: Receiver<Result<TimerSchedulerMsg, IpcError>>, /// The logic and data behing scheduling timer events. timer_scheduler: TimerScheduler, /// A single WebRender document the constellation operates on. webrender_document: webrender_api::DocumentId, /// A channel for the constellation to send messages to the /// WebRender thread. webrender_api_sender: webrender_api::RenderApiSender, /// A map of message-port Id to info. message_ports: HashMap<MessagePortId, MessagePortInfo>, /// A map of router-id to ipc-sender, to route messages to ports. message_port_routers: HashMap<MessagePortRouterId, IpcSender<MessagePortMsg>>, /// The set of all the pipelines in the browser. (See the `pipeline` module /// for more details.) pipelines: HashMap<PipelineId, Pipeline>, /// The set of all the browsing contexts in the browser. browsing_contexts: HashMap<BrowsingContextId, BrowsingContext>, /// A user agent holds a a set of browsing context groups. /// /// https://html.spec.whatwg.org/multipage/#browsing-context-group-set browsing_context_group_set: HashMap<BrowsingContextGroupId, BrowsingContextGroup>, /// The Id counter for BrowsingContextGroup. browsing_context_group_next_id: u32, /// When a navigation is performed, we do not immediately update /// the session history, instead we ask the event loop to begin loading /// the new document, and do not update the browsing context until the /// document is active. Between starting the load and it activating, /// we store a `SessionHistoryChange` object for the navigation in progress. pending_changes: Vec<SessionHistoryChange>, /// Pipeline IDs are namespaced in order to avoid name collisions, /// and the namespaces are allocated by the constellation. next_pipeline_namespace_id: PipelineNamespaceId, /// The size of the top-level window. window_size: WindowSizeData, /// Bits of state used to interact with the webdriver implementation webdriver: WebDriverData, /// Document states for loaded pipelines (used only when writing screenshots). document_states: HashMap<PipelineId, DocumentState>, /// Are we shutting down? shutting_down: bool, /// Have we seen any warnings? Hopefully always empty! /// The buffer contains `(thread_name, reason)` entries. handled_warnings: VecDeque<(Option<String>, String)>, /// The random number generator and probability for closing pipelines. /// This is for testing the hardening of the constellation. random_pipeline_closure: Option<(ServoRng, f32)>, /// Phantom data that keeps the Rust type system happy. phantom: PhantomData<(Message, LTF, STF)>, /// Entry point to create and get channels to a WebGLThread. webgl_threads: Option<WebGLThreads>, /// A channel through which messages can be sent to the webvr thread. webvr_chan: Option<IpcSender<WebVRMsg>>, /// The XR device registry webxr_registry: webxr_api::Registry, /// A channel through which messages can be sent to the canvas paint thread. canvas_chan: IpcSender<CanvasMsg>, /// Navigation requests from script awaiting approval from the embedder. pending_approval_navigations: PendingApprovalNavigations, /// Bitmask which indicates which combination of mouse buttons are /// currently being pressed. pressed_mouse_buttons: u16, is_running_problem_test: bool, /// If True, exits on thread failure instead of displaying about:failure hard_fail: bool, /// If set with --disable-canvas-aa, disable antialiasing on the HTML /// canvas element. /// Like --disable-text-aa, this is useful for reftests where pixel perfect /// results are required. enable_canvas_antialiasing: bool, /// Entry point to create and get channels to a GLPlayerThread. glplayer_threads: Option<GLPlayerThreads>, /// Application window's GL Context for Media player player_context: WindowGLContext, /// Mechanism to force the compositor to process events. event_loop_waker: Option<Box<dyn EventLoopWaker>>, } /// State needed to construct a constellation. pub struct InitialConstellationState { /// A channel through which messages can be sent to the embedder. pub embedder_proxy: EmbedderProxy, /// A channel through which messages can be sent to the compositor. pub compositor_proxy: CompositorProxy, /// A channel to the debugger, if applicable. pub debugger_chan: Option<debugger::Sender>, /// A channel to the developer tools, if applicable. pub devtools_chan: Option<Sender<DevtoolsControlMsg>>, /// A channel to the bluetooth thread. pub bluetooth_thread: IpcSender<BluetoothRequest>, /// A channel to the font cache thread. pub font_cache_thread: FontCacheThread, /// A channel to the resource thread. pub public_resource_threads: ResourceThreads, /// A channel to the resource thread. pub private_resource_threads: ResourceThreads, /// A channel to the time profiler thread. pub time_profiler_chan: time::ProfilerChan, /// A channel to the memory profiler thread. pub mem_profiler_chan: mem::ProfilerChan, /// Webrender document ID. pub webrender_document: webrender_api::DocumentId, /// Webrender API. pub webrender_api_sender: webrender_api::RenderApiSender, /// Entry point to create and get channels to a WebGLThread. pub webgl_threads: Option<WebGLThreads>, /// A channel to the webgl thread. pub webvr_chan: Option<IpcSender<WebVRMsg>>, /// The XR device registry pub webxr_registry: webxr_api::Registry, pub glplayer_threads: Option<GLPlayerThreads>, /// Application window's GL Context for Media player pub player_context: WindowGLContext, /// Mechanism to force the compositor to process events. pub event_loop_waker: Option<Box<dyn EventLoopWaker>>, } /// Data needed for webdriver struct WebDriverData { load_channel: Option<(PipelineId, IpcSender<webdriver_msg::LoadStatus>)>, resize_channel: Option<IpcSender<WindowSizeData>>, } impl WebDriverData { fn new() -> WebDriverData { WebDriverData { load_channel: None, resize_channel: None, } } } /// When we are running reftests, we save an image to compare against a reference. /// This enum gives the possible states of preparing such an image. #[derive(Debug, PartialEq)] enum ReadyToSave { NoTopLevelBrowsingContext, PendingChanges, WebFontNotLoaded, DocumentLoading, EpochMismatch, PipelineUnknown, Ready, } /// When we are exiting a pipeline, we can either force exiting or not. /// A normal exit waits for the compositor to update its state before /// exiting, and delegates layout exit to script. A forced exit does /// not notify the compositor, and exits layout without involving script. #[derive(Clone, Copy)] enum ExitPipelineMode { Normal, Force, } /// The constellation uses logging to perform crash reporting. /// The constellation receives all `warn!`, `error!` and `panic!` messages, /// and generates a crash report when it receives a panic. /// A logger directed at the constellation from content processes #[derive(Clone)] pub struct FromScriptLogger { /// A channel to the constellation pub script_to_constellation_chan: Arc<ReentrantMutex<ScriptToConstellationChan>>, } impl FromScriptLogger { /// Create a new constellation logger. pub fn new(script_to_constellation_chan: ScriptToConstellationChan) -> FromScriptLogger { FromScriptLogger { script_to_constellation_chan: Arc::new(ReentrantMutex::new( script_to_constellation_chan, )), } } /// The maximum log level the constellation logger is interested in. pub fn filter(&self) -> LevelFilter { LevelFilter::Warn } } impl Log for FromScriptLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= Level::Warn } fn log(&self, record: &Record) { if let Some(entry) = log_entry(record) { debug!("Sending log entry {:?}.", entry); let thread_name = thread::current().name().map(ToOwned::to_owned); let msg = FromScriptMsg::LogEntry(thread_name, entry); let chan = self .script_to_constellation_chan .lock() .unwrap_or_else(|err| err.into_inner()); let _ = chan.send(msg); } } fn flush(&self) {} } /// A logger directed at the constellation from the compositor #[derive(Clone)] pub struct FromCompositorLogger { /// A channel to the constellation pub constellation_chan: Arc<ReentrantMutex<Sender<FromCompositorMsg>>>, } impl FromCompositorLogger { /// Create a new constellation logger. pub fn new(constellation_chan: Sender<FromCompositorMsg>) -> FromCompositorLogger { FromCompositorLogger { constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan)), } } /// The maximum log level the constellation logger is interested in. pub fn filter(&self) -> LevelFilter { LevelFilter::Warn } } impl Log for FromCompositorLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= Level::Warn } fn log(&self, record: &Record) { if let Some(entry) = log_entry(record) { debug!("Sending log entry {:?}.", entry); let top_level_id = TopLevelBrowsingContextId::installed(); let thread_name = thread::current().name().map(ToOwned::to_owned); let msg = FromCompositorMsg::LogEntry(top_level_id, thread_name, entry); let chan = self .constellation_chan .lock() .unwrap_or_else(|err| err.into_inner()); let _ = chan.send(msg); } } fn flush(&self) {} } /// Rust uses `Record` for storing logging, but servo converts that to /// a `LogEntry`. We do this so that we can record panics as well as log /// messages, and because `Record` does not implement serde (de)serialization, /// so cannot be used over an IPC channel. fn log_entry(record: &Record) -> Option<LogEntry> { match record.level() { Level::Error if thread::panicking() => Some(LogEntry::Panic( format!("{}", record.args()), format!("{:?}", Backtrace::new()), )), Level::Error => Some(LogEntry::Error(format!("{}", record.args()))), Level::Warn => Some(LogEntry::Warn(format!("{}", record.args()))), _ => None, } } /// The number of warnings to include in each crash report. const WARNINGS_BUFFER_SIZE: usize = 32; /// Route an ipc receiver to an mpsc receiver, preserving any errors. /// This is the same as `route_ipc_receiver_to_new_mpsc_receiver`, /// but does not panic on deserializtion errors. fn route_ipc_receiver_to_new_mpsc_receiver_preserving_errors<T>( ipc_receiver: IpcReceiver<T>, ) -> Receiver<Result<T, IpcError>> where T: for<'de> Deserialize<'de> + Serialize + Send + 'static, { let (mpsc_sender, mpsc_receiver) = unbounded(); ROUTER.add_route( ipc_receiver.to_opaque(), Box::new(move |message| drop(mpsc_sender.send(message.to::<T>()))), ); mpsc_receiver } impl<Message, LTF, STF> Constellation<Message, LTF, STF> where LTF: LayoutThreadFactory<Message = Message>, STF: ScriptThreadFactory<Message = Message>, { /// Create a new constellation thread. pub fn start( state: InitialConstellationState, initial_window_size: WindowSizeData, random_pipeline_closure_probability: Option<f32>, random_pipeline_closure_seed: Option<usize>, is_running_problem_test: bool, hard_fail: bool, enable_canvas_antialiasing: bool, ) -> (Sender<FromCompositorMsg>, IpcSender<SWManagerMsg>) { let (compositor_sender, compositor_receiver) = unbounded(); // service worker manager to communicate with constellation let (swmanager_sender, swmanager_receiver) = ipc::channel().expect("ipc channel failure"); let sw_mgr_clone = swmanager_sender.clone(); thread::Builder::new() .name("Constellation".to_owned()) .spawn(move || { let (ipc_script_sender, ipc_script_receiver) = ipc::channel().expect("ipc channel failure"); let script_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(ipc_script_receiver); let (namespace_sender, ipc_namespace_receiver) = ipc::channel().expect("ipc channel failure"); let namespace_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors( ipc_namespace_receiver, ); let (scheduler_chan, ipc_scheduler_receiver) = ipc::channel().expect("ipc channel failure"); let scheduler_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors( ipc_scheduler_receiver, ); let (background_hang_monitor_sender, ipc_bhm_receiver) = ipc::channel().expect("ipc channel failure"); let background_hang_monitor_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(ipc_bhm_receiver); // If we are in multiprocess mode, // a dedicated per-process hang monitor will be initialized later inside the content process. // See run_content_process in servo/lib.rs let (background_monitor_register, sampler_chan) = if opts::multiprocess() { (None, vec![]) } else { let (sampling_profiler_control, sampling_profiler_port) = ipc::channel().expect("ipc channel failure"); ( Some(HangMonitorRegister::init( background_hang_monitor_sender.clone(), sampling_profiler_port, )), vec![sampling_profiler_control], ) }; let (ipc_layout_sender, ipc_layout_receiver) = ipc::channel().expect("ipc channel failure"); let layout_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(ipc_layout_receiver); let (network_listener_sender, network_listener_receiver) = unbounded(); let swmanager_receiver = route_ipc_receiver_to_new_mpsc_receiver_preserving_errors(swmanager_receiver); // Zero is reserved for the embedder. PipelineNamespace::install(PipelineNamespaceId(1)); let mut constellation: Constellation<Message, LTF, STF> = Constellation { namespace_receiver, namespace_sender, script_sender: ipc_script_sender, background_hang_monitor_sender, background_hang_monitor_receiver, background_monitor_register, sampling_profiler_control: sampler_chan, layout_sender: ipc_layout_sender, script_receiver: script_receiver, compositor_receiver: compositor_receiver, layout_receiver: layout_receiver, network_listener_sender: network_listener_sender, network_listener_receiver: network_listener_receiver, embedder_proxy: state.embedder_proxy, compositor_proxy: state.compositor_proxy, active_browser_id: None, browsers: HashMap::new(), debugger_chan: state.debugger_chan, devtools_chan: state.devtools_chan, bluetooth_thread: state.bluetooth_thread, public_resource_threads: state.public_resource_threads, private_resource_threads: state.private_resource_threads, font_cache_thread: state.font_cache_thread, swmanager_chan: None, swmanager_receiver: swmanager_receiver, swmanager_sender: sw_mgr_clone, browsing_context_group_set: Default::default(), browsing_context_group_next_id: Default::default(), message_ports: HashMap::new(), message_port_routers: HashMap::new(), pipelines: HashMap::new(), browsing_contexts: HashMap::new(), pending_changes: vec![], // We initialize the namespace at 2, since we reserved // namespace 0 for the embedder, and 0 for the constellation next_pipeline_namespace_id: PipelineNamespaceId(2), time_profiler_chan: state.time_profiler_chan, mem_profiler_chan: state.mem_profiler_chan, window_size: initial_window_size, phantom: PhantomData, webdriver: WebDriverData::new(), timer_scheduler: TimerScheduler::new(), scheduler_chan, scheduler_receiver, document_states: HashMap::new(), webrender_document: state.webrender_document, webrender_api_sender: state.webrender_api_sender, shutting_down: false, handled_warnings: VecDeque::new(), random_pipeline_closure: random_pipeline_closure_probability.map(|prob| { let seed = random_pipeline_closure_seed.unwrap_or_else(random); let rng = ServoRng::new_manually_reseeded(seed as u64); warn!("Randomly closing pipelines."); info!("Using seed {} for random pipeline closure.", seed); (rng, prob) }), webgl_threads: state.webgl_threads, webvr_chan: state.webvr_chan, webxr_registry: state.webxr_registry, canvas_chan: CanvasPaintThread::start(), pending_approval_navigations: HashMap::new(), pressed_mouse_buttons: 0, is_running_problem_test, hard_fail, enable_canvas_antialiasing, glplayer_threads: state.glplayer_threads, player_context: state.player_context, event_loop_waker: state.event_loop_waker, }; constellation.run(); }) .expect("Thread spawning failed"); (compositor_sender, swmanager_sender) } /// The main event loop for the constellation. fn run(&mut self) { while !self.shutting_down || !self.pipelines.is_empty() { // Randomly close a pipeline if --random-pipeline-closure-probability is set // This is for testing the hardening of the constellation. self.maybe_close_random_pipeline(); self.handle_request(); } self.handle_shutdown(); } /// Generate a new pipeline id namespace. fn next_pipeline_namespace_id(&mut self) -> PipelineNamespaceId { let namespace_id = self.next_pipeline_namespace_id; let PipelineNamespaceId(ref mut i) = self.next_pipeline_namespace_id; *i += 1; namespace_id } fn next_browsing_context_group_id(&mut self) -> BrowsingContextGroupId { let id = self.browsing_context_group_next_id; self.browsing_context_group_next_id += 1; BrowsingContextGroupId(id) } fn get_event_loop( &mut self, host: &Host, top_level_browsing_context_id: &TopLevelBrowsingContextId, opener: &Option<BrowsingContextId>, ) -> Result<Weak<EventLoop>, &'static str> { let bc_group = match opener { Some(browsing_context_id) => { let opener = self .browsing_contexts .get(&browsing_context_id) .ok_or("Opener was closed before the openee started")?; self.browsing_context_group_set .get(&opener.bc_group_id) .ok_or("Opener belongs to an unknow BC group")? }, None => self .browsing_context_group_set .iter() .filter_map(|(_, bc_group)| { if bc_group .top_level_browsing_context_set .contains(&top_level_browsing_context_id) { Some(bc_group) } else { None } }) .last() .ok_or( "Trying to get an event-loop for a top-level belonging to an unknown BC group", )?, }; bc_group .event_loops .get(host) .ok_or("Trying to get an event-loop from an unknown BC group") .map(|event_loop| event_loop.clone()) } fn set_event_loop( &mut self, event_loop: Weak<EventLoop>, host: Host, top_level_browsing_context_id: TopLevelBrowsingContextId, opener: Option<BrowsingContextId>, ) { let relevant_top_level = if let Some(opener) = opener { match self.browsing_contexts.get(&opener) { Some(opener) => opener.top_level_id, None => { warn!("Setting event-loop for an unknown auxiliary"); return; }, } } else { top_level_browsing_context_id }; let maybe_bc_group_id = self .browsing_context_group_set .iter() .filter_map(|(id, bc_group)| { if bc_group .top_level_browsing_context_set .contains(&top_level_browsing_context_id) { Some(id.clone()) } else { None } }) .last(); let bc_group_id = match maybe_bc_group_id { Some(id) => id, None => { warn!("Trying to add an event-loop to an unknown BC group"); return; }, }; if let Some(bc_group) = self.browsing_context_group_set.get_mut(&bc_group_id) { if !bc_group .event_loops .insert(host.clone(), event_loop) .is_none() { warn!( "Double-setting an event-loop for {:?} at {:?}", host, relevant_top_level ); } } } /// Helper function for creating a pipeline fn new_pipeline( &mut self, pipeline_id: PipelineId, browsing_context_id: BrowsingContextId, top_level_browsing_context_id: TopLevelBrowsingContextId, parent_pipeline_id: Option<PipelineId>, opener: Option<BrowsingContextId>, initial_window_size: Size2D<f32, CSSPixel>, // TODO: we have to provide ownership of the LoadData // here, because it will be send on an ipc channel, // and ipc channels take onership of their data. // https://github.com/servo/ipc-channel/issues/138 load_data: LoadData, sandbox: IFrameSandboxState, is_private: bool, is_visible: bool, ) { if self.shutting_down { return; } debug!( "Creating new pipeline {} in browsing context {}.", pipeline_id, browsing_context_id ); let (event_loop, host) = match sandbox { IFrameSandboxState::IFrameSandboxed => (None, None), IFrameSandboxState::IFrameUnsandboxed => { // If this is an about:blank load, it must share the creator's event loop. // This must match the logic in the script thread when determining the proper origin. if load_data.url.as_str() != "about:blank" { match reg_host(&load_data.url) { None => (None, None), Some(host) => { match self.get_event_loop( &host, &top_level_browsing_context_id, &opener, ) { Err(err) => { warn!("{}", err); (None, Some(host)) }, Ok(event_loop) => { if let Some(event_loop) = event_loop.upgrade() { (Some(event_loop), None) } else { (None, Some(host)) } }, } }, } } else if let Some(parent) = parent_pipeline_id.and_then(|pipeline_id| self.pipelines.get(&pipeline_id)) { (Some(parent.event_loop.clone()), None) } else if let Some(creator) = load_data .creator_pipeline_id .and_then(|pipeline_id| self.pipelines.get(&pipeline_id)) { (Some(creator.event_loop.clone()), None) } else { (None, None) } }, }; let resource_threads = if is_private { self.private_resource_threads.clone() } else { self.public_resource_threads.clone() }; let result = Pipeline::spawn::<Message, LTF, STF>(InitialPipelineState { id: pipeline_id, browsing_context_id, top_level_browsing_context_id, parent_pipeline_id, opener, script_to_constellation_chan: ScriptToConstellationChan { sender: self.script_sender.clone(), pipeline_id: pipeline_id, }, namespace_request_sender: self.namespace_sender.clone(), pipeline_namespace_id: self.next_pipeline_namespace_id(), background_monitor_register: self.background_monitor_register.clone(), background_hang_monitor_to_constellation_chan: self .background_hang_monitor_sender .clone(), layout_to_constellation_chan: self.layout_sender.clone(), scheduler_chan: self.scheduler_chan.clone(), compositor_proxy: self.compositor_proxy.clone(), devtools_chan: self.devtools_chan.clone(), bluetooth_thread: self.bluetooth_thread.clone(), swmanager_thread: self.swmanager_sender.clone(), font_cache_thread: self.font_cache_thread.clone(), resource_threads, time_profiler_chan: self.time_profiler_chan.clone(), mem_profiler_chan: self.mem_profiler_chan.clone(), window_size: WindowSizeData { initial_viewport: initial_window_size, device_pixel_ratio: self.window_size.device_pixel_ratio, }, event_loop, load_data, prev_visibility: is_visible, webrender_api_sender: self.webrender_api_sender.clone(), webrender_document: self.webrender_document, webgl_chan: self .webgl_threads .as_ref() .map(|threads| threads.pipeline()), webvr_chan: self.webvr_chan.clone(), webxr_registry: self.webxr_registry.clone(), player_context: self.player_context.clone(), event_loop_waker: self.event_loop_waker.as_ref().map(|w| (*w).clone_box()), }); let pipeline = match result { Ok(result) => result, Err(e) => return self.handle_send_error(pipeline_id, e), }; if let Some(sampler_chan) = pipeline.sampler_control_chan { self.sampling_profiler_control.push(sampler_chan); } if let Some(host) = host { debug!( "Adding new host entry {} for top-level browsing context {}.", host, top_level_browsing_context_id ); self.set_event_loop( Rc::downgrade(&pipeline.pipeline.event_loop), host, top_level_browsing_context_id, opener, ); } assert!(!self.pipelines.contains_key(&pipeline_id)); self.pipelines.insert(pipeline_id, pipeline.pipeline); } /// Get an iterator for the fully active browsing contexts in a subtree. fn fully_active_descendant_browsing_contexts_iter( &self, browsing_context_id: BrowsingContextId, ) -> FullyActiveBrowsingContextsIterator { FullyActiveBrowsingContextsIterator { stack: vec![browsing_context_id], pipelines: &self.pipelines, browsing_contexts: &self.browsing_contexts, } } /// Get an iterator for the fully active browsing contexts in a tree. fn fully_active_browsing_contexts_iter( &self, top_level_browsing_context_id: TopLevelBrowsingContextId, ) -> FullyActiveBrowsingContextsIterator { self.fully_active_descendant_browsing_contexts_iter(BrowsingContextId::from( top_level_browsing_context_id, )) } /// Get an iterator for the browsing contexts in a subtree. fn all_descendant_browsing_contexts_iter( &self, browsing_context_id: BrowsingContextId, ) -> AllBrowsingContextsIterator { AllBrowsingContextsIterator { stack: vec![browsing_context_id], pipelines: &self.pipelines, browsing_contexts: &self.browsing_contexts, } } /// Create a new browsing context and update the internal bookkeeping. fn new_browsing_context( &mut self, browsing_context_id: BrowsingContextId, top_level_id: TopLevelBrowsingContextId, pipeline_id: PipelineId, parent_pipeline_id: Option<PipelineId>, size: Size2D<f32, CSSPixel>, is_private: bool, is_visible: bool, ) { debug!("Creating new browsing context {}", browsing_context_id); let bc_group_id = match self .browsing_context_group_set .iter_mut() .filter_map(|(id, bc_group)| { if bc_group .top_level_browsing_context_set .contains(&top_level_id) { Some(id) } else { None } }) .last() { Some(id) => id.clone(), None => { warn!( "Top-level was unpexpectedly removed from its top_level_browsing_context_set." ); return; }, }; let browsing_context = BrowsingContext::new( bc_group_id, browsing_context_id, top_level_id, pipeline_id, parent_pipeline_id, size, is_private, is_visible, ); self.browsing_contexts .insert(browsing_context_id, browsing_context); // If this context is a nested container, attach it to parent pipeline. if let Some(parent_pipeline_id) = parent_pipeline_id { if let Some(parent) = self.pipelines.get_mut(&parent_pipeline_id) { parent.add_child(browsing_context_id); } } } fn add_pending_change(&mut self, change: SessionHistoryChange) { debug!( "adding pending session history change with {}", if change.replace.is_some() { "replacement" } else { "no replacement" }, ); self.handle_load_start_msg( change.top_level_browsing_context_id, change.browsing_context_id, ); self.pending_changes.push(change); } /// Handles loading pages, navigation, and granting access to the compositor fn handle_request(&mut self) { #[derive(Debug)] enum Request { PipelineNamespace(PipelineNamespaceRequest), Script((PipelineId, FromScriptMsg)), BackgroundHangMonitor(HangMonitorAlert), Compositor(FromCompositorMsg), Layout(FromLayoutMsg), NetworkListener((PipelineId, FetchResponseMsg)), FromSWManager(SWManagerMsg), Timer(TimerSchedulerMsg), } // A timeout corresponding to the earliest scheduled timer event, if any. let scheduler_timeout = self .timer_scheduler .check_timers() .map(|timeout| after(timeout)) .unwrap_or(never()); // Get one incoming request. // This is one of the few places where the compositor is // allowed to panic. If one of the receiver.recv() calls // fails, it is because the matching sender has been // reclaimed, but this can't happen in normal execution // because the constellation keeps a pointer to the sender, // so it should never be reclaimed. A possible scenario in // which receiver.recv() fails is if some unsafe code // produces undefined behaviour, resulting in the destructor // being called. If this happens, there's not much we can do // other than panic. let request = select! { recv(self.namespace_receiver) -> msg => { msg.expect("Unexpected script channel panic in constellation").map(Request::PipelineNamespace) } recv(self.script_receiver) -> msg => { msg.expect("Unexpected script channel panic in constellation").map(Request::Script) } recv(self.background_hang_monitor_receiver) -> msg => { msg.expect("Unexpected BHM channel panic in constellation").map(Request::BackgroundHangMonitor) } recv(self.compositor_receiver) -> msg => { Ok(Request::Compositor(msg.expect("Unexpected compositor channel panic in constellation"))) } recv(self.layout_receiver) -> msg => { msg.expect("Unexpected layout channel panic in constellation").map(Request::Layout) } recv(self.network_listener_receiver) -> msg => { Ok(Request::NetworkListener( msg.expect("Unexpected network listener channel panic in constellation") )) } recv(self.swmanager_receiver) -> msg => { msg.expect("Unexpected panic channel panic in constellation").map(Request::FromSWManager) } recv(self.scheduler_receiver) -> msg => { msg.expect("Unexpected panic channel panic in constellation").map(Request::Timer) } recv(scheduler_timeout) -> _ => { // Note: by returning, we go back to the top, // where check_timers will be called. return; }, }; let request = match request { Ok(request) => request, Err(err) => return error!("Deserialization failed ({}).", err), }; match request { Request::PipelineNamespace(message) => { self.handle_request_for_pipeline_namespace(message) }, Request::Compositor(message) => self.handle_request_from_compositor(message), Request::Script(message) => { self.handle_request_from_script(message); }, Request::BackgroundHangMonitor(message) => { self.handle_request_from_background_hang_monitor(message); }, Request::Layout(message) => { self.handle_request_from_layout(message); }, Request::NetworkListener(message) => { self.handle_request_from_network_listener(message); }, Request::FromSWManager(message) => { self.handle_request_from_swmanager(message); }, Request::Timer(message) => { self.timer_scheduler.handle_timer_request(message); }, } } fn handle_request_for_pipeline_namespace(&mut self, request: PipelineNamespaceRequest) { let PipelineNamespaceRequest(sender) = request; let _ = sender.send(self.next_pipeline_namespace_id()); } fn handle_request_from_background_hang_monitor(&self, message: HangMonitorAlert) { match message { HangMonitorAlert::Profile(bytes) => self .embedder_proxy .send((None, EmbedderMsg::ReportProfile(bytes))), HangMonitorAlert::Hang(hang) => { // TODO: In case of a permanent hang being reported, add a "kill script" workflow, // via the embedder? warn!("Component hang alert: {:?}", hang); }, } } fn handle_request_from_network_listener(&mut self, message: (PipelineId, FetchResponseMsg)) { let (id, message_) = message; let result = match self.pipelines.get(&id) { Some(pipeline) => { let msg = ConstellationControlMsg::NavigationResponse(id, message_); pipeline.event_loop.send(msg) }, None => { return warn!("Pipeline {:?} got fetch data after closure!", id); }, }; if let Err(e) = result { self.handle_send_error(id, e); } } fn handle_request_from_swmanager(&mut self, message: SWManagerMsg) { match message { SWManagerMsg::OwnSender(sw_sender) => { // store service worker manager for communicating with it. self.swmanager_chan = Some(sw_sender); }, } } fn handle_request_from_compositor(&mut self, message: FromCompositorMsg) { debug!("constellation got {:?} message", message); match message { FromCompositorMsg::Exit => { self.handle_exit(); }, FromCompositorMsg::GetBrowsingContext(pipeline_id, resp_chan) => { self.handle_get_browsing_context(pipeline_id, resp_chan); }, FromCompositorMsg::GetPipeline(browsing_context_id, resp_chan) => { self.handle_get_pipeline(browsing_context_id, resp_chan); }, FromCompositorMsg::GetFocusTopLevelBrowsingContext(resp_chan) => { // The focused browsing context's top-level browsing context is // the active browser's id itself. let _ = resp_chan.send(self.active_browser_id); }, FromCompositorMsg::Keyboard(key_event) => { self.handle_key_msg(key_event); }, // Perform a navigation previously requested by script, if approved by the embedder. // If there is already a pending page (self.pending_changes), it will not be overridden; // However, if the id is not encompassed by another change, it will be. FromCompositorMsg::AllowNavigationResponse(pipeline_id, allowed) => { let pending = self.pending_approval_navigations.remove(&pipeline_id); let top_level_browsing_context_id = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.top_level_browsing_context_id, None => return warn!("Attempted to navigate {} after closure.", pipeline_id), }; match pending { Some((load_data, replace)) => { if allowed { self.load_url( top_level_browsing_context_id, pipeline_id, load_data, replace, ); } else { let pipeline_is_top_level_pipeline = self .browsing_contexts .get(&BrowsingContextId::from(top_level_browsing_context_id)) .map(|ctx| ctx.pipeline_id == pipeline_id) .unwrap_or(false); // If the navigation is refused, and this concerns an iframe, // we need to take it out of it's "delaying-load-events-mode". // https://html.spec.whatwg.org/multipage/#delaying-load-events-mode if !pipeline_is_top_level_pipeline { let msg = ConstellationControlMsg::StopDelayingLoadEventsMode( pipeline_id, ); let result = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.send(msg), None => { return warn!( "Attempted to navigate {} after closure.", pipeline_id ); }, }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } } }, None => { return warn!( "AllowNavigationReqsponse for unknow request: {:?}", pipeline_id ); }, }; }, // Load a new page from a typed url // If there is already a pending page (self.pending_changes), it will not be overridden; // However, if the id is not encompassed by another change, it will be. FromCompositorMsg::LoadUrl(top_level_browsing_context_id, url) => { let load_data = LoadData::new(LoadOrigin::Constellation, url, None, None, None); let ctx_id = BrowsingContextId::from(top_level_browsing_context_id); let pipeline_id = match self.browsing_contexts.get(&ctx_id) { Some(ctx) => ctx.pipeline_id, None => { return warn!( "LoadUrl for unknow browsing context: {:?}", top_level_browsing_context_id ); }, }; // Since this is a top-level load, initiated by the embedder, go straight to load_url, // bypassing schedule_navigation. self.load_url( top_level_browsing_context_id, pipeline_id, load_data, HistoryEntryReplacement::Disabled, ); }, FromCompositorMsg::IsReadyToSaveImage(pipeline_states) => { let is_ready = self.handle_is_ready_to_save_image(pipeline_states); debug!("Ready to save image {:?}.", is_ready); if self.is_running_problem_test { println!("got ready to save image query, result is {:?}", is_ready); } let is_ready = is_ready == ReadyToSave::Ready; self.compositor_proxy .send(ToCompositorMsg::IsReadyToSaveImageReply(is_ready)); if self.is_running_problem_test { println!("sent response"); } }, // Create a new top level browsing context. Will use response_chan to return // the browsing context id. FromCompositorMsg::NewBrowser(url, top_level_browsing_context_id) => { self.handle_new_top_level_browsing_context(url, top_level_browsing_context_id); }, // Close a top level browsing context. FromCompositorMsg::CloseBrowser(top_level_browsing_context_id) => { self.handle_close_top_level_browsing_context(top_level_browsing_context_id); }, // Panic a top level browsing context. FromCompositorMsg::SendError(top_level_browsing_context_id, error) => { debug!("constellation got SendError message"); if let Some(id) = top_level_browsing_context_id { self.handle_panic(id, error, None); } else { warn!("constellation got a SendError message without top level id"); } }, // Send frame tree to WebRender. Make it visible. FromCompositorMsg::SelectBrowser(top_level_browsing_context_id) => { self.send_frame_tree(top_level_browsing_context_id); }, // Handle a forward or back request FromCompositorMsg::TraverseHistory(top_level_browsing_context_id, direction) => { self.handle_traverse_history_msg(top_level_browsing_context_id, direction); }, FromCompositorMsg::WindowSize(top_level_browsing_context_id, new_size, size_type) => { self.handle_window_size_msg(top_level_browsing_context_id, new_size, size_type); }, FromCompositorMsg::TickAnimation(pipeline_id, tick_type) => { self.handle_tick_animation(pipeline_id, tick_type) }, FromCompositorMsg::WebDriverCommand(command) => { self.handle_webdriver_msg(command); }, FromCompositorMsg::Reload(top_level_browsing_context_id) => { self.handle_reload_msg(top_level_browsing_context_id); }, FromCompositorMsg::LogEntry(top_level_browsing_context_id, thread_name, entry) => { self.handle_log_entry(top_level_browsing_context_id, thread_name, entry); }, FromCompositorMsg::WebVREvents(pipeline_ids, events) => { self.handle_webvr_events(pipeline_ids, events); }, FromCompositorMsg::ForwardEvent(destination_pipeline_id, event) => { self.forward_event(destination_pipeline_id, event); }, FromCompositorMsg::SetCursor(cursor) => self.handle_set_cursor_msg(cursor), FromCompositorMsg::EnableProfiler(rate, max_duration) => { for chan in &self.sampling_profiler_control { if let Err(e) = chan.send(SamplerControlMsg::Enable(rate, max_duration)) { warn!("error communicating with sampling profiler: {}", e); } } }, FromCompositorMsg::DisableProfiler => { for chan in &self.sampling_profiler_control { if let Err(e) = chan.send(SamplerControlMsg::Disable) { warn!("error communicating with sampling profiler: {}", e); } } }, FromCompositorMsg::ExitFullScreen(top_level_browsing_context_id) => { self.handle_exit_fullscreen_msg(top_level_browsing_context_id); }, } } fn handle_request_from_script(&mut self, message: (PipelineId, FromScriptMsg)) { let (source_pipeline_id, content) = message; debug!( "constellation got {:?} message from pipeline {}", content, source_pipeline_id ); let source_top_ctx_id = match self .pipelines .get(&source_pipeline_id) .map(|pipeline| pipeline.top_level_browsing_context_id) { None => return warn!("ScriptMsg from closed pipeline {:?}.", source_pipeline_id), Some(ctx) => ctx, }; match content { FromScriptMsg::RerouteMessagePort(port_id, task) => { self.handle_reroute_messageport(port_id, task); }, FromScriptMsg::MessagePortShipped(port_id) => { self.handle_messageport_shipped(port_id); }, FromScriptMsg::NewMessagePortRouter(router_id, ipc_sender) => { self.handle_new_messageport_router(router_id, ipc_sender); }, FromScriptMsg::RemoveMessagePortRouter(router_id) => { self.handle_remove_messageport_router(router_id); }, FromScriptMsg::NewMessagePort(router_id, port_id) => { self.handle_new_messageport(router_id, port_id); }, FromScriptMsg::RemoveMessagePort(port_id) => { self.handle_remove_messageport(port_id); }, FromScriptMsg::EntanglePorts(port1, port2) => { self.handle_entangle_messageports(port1, port2); }, FromScriptMsg::ForwardToEmbedder(embedder_msg) => { self.embedder_proxy .send((Some(source_top_ctx_id), embedder_msg)); }, FromScriptMsg::PipelineExited => { self.handle_pipeline_exited(source_pipeline_id); }, FromScriptMsg::DiscardDocument => { self.handle_discard_document(source_top_ctx_id, source_pipeline_id); }, FromScriptMsg::DiscardTopLevelBrowsingContext => { self.handle_close_top_level_browsing_context(source_top_ctx_id); }, FromScriptMsg::InitiateNavigateRequest(req_init, cancel_chan) => { self.handle_navigate_request(source_pipeline_id, req_init, cancel_chan); }, FromScriptMsg::ScriptLoadedURLInIFrame(load_info) => { self.handle_script_loaded_url_in_iframe_msg(load_info); }, FromScriptMsg::ScriptNewIFrame(load_info, layout_sender) => { self.handle_script_new_iframe(load_info, layout_sender); }, FromScriptMsg::ScriptNewAuxiliary(load_info, layout_sender) => { self.handle_script_new_auxiliary(load_info, layout_sender); }, FromScriptMsg::ChangeRunningAnimationsState(animation_state) => { self.handle_change_running_animations_state(source_pipeline_id, animation_state) }, // Ask the embedder for permission to load a new page. FromScriptMsg::LoadUrl(load_data, replace) => { self.schedule_navigation(source_top_ctx_id, source_pipeline_id, load_data, replace); }, FromScriptMsg::AbortLoadUrl => { self.handle_abort_load_url_msg(source_pipeline_id); }, // A page loaded has completed all parsing, script, and reflow messages have been sent. FromScriptMsg::LoadComplete => { self.handle_load_complete_msg(source_top_ctx_id, source_pipeline_id) }, // Handle navigating to a fragment FromScriptMsg::NavigatedToFragment(new_url, replacement_enabled) => { self.handle_navigated_to_fragment(source_pipeline_id, new_url, replacement_enabled); }, // Handle a forward or back request FromScriptMsg::TraverseHistory(direction) => { self.handle_traverse_history_msg(source_top_ctx_id, direction); }, // Handle a push history state request. FromScriptMsg::PushHistoryState(history_state_id, url) => { self.handle_push_history_state_msg(source_pipeline_id, history_state_id, url); }, FromScriptMsg::ReplaceHistoryState(history_state_id, url) => { self.handle_replace_history_state_msg(source_pipeline_id, history_state_id, url); }, // Handle a joint session history length request. FromScriptMsg::JointSessionHistoryLength(sender) => { self.handle_joint_session_history_length(source_top_ctx_id, sender); }, // Notification that the new document is ready to become active FromScriptMsg::ActivateDocument => { self.handle_activate_document_msg(source_pipeline_id); }, // Update pipeline url after redirections FromScriptMsg::SetFinalUrl(final_url) => { // The script may have finished loading after we already started shutting down. if let Some(ref mut pipeline) = self.pipelines.get_mut(&source_pipeline_id) { pipeline.url = final_url; } else { warn!("constellation got set final url message for dead pipeline"); } }, FromScriptMsg::PostMessage { target: browsing_context_id, source: source_pipeline_id, target_origin: origin, source_origin, data, } => { self.handle_post_message_msg( browsing_context_id, source_pipeline_id, origin, source_origin, data, ); }, FromScriptMsg::Focus => { self.handle_focus_msg(source_pipeline_id); }, FromScriptMsg::VisibilityChangeComplete(is_visible) => { self.handle_visibility_change_complete(source_pipeline_id, is_visible); }, FromScriptMsg::RemoveIFrame(browsing_context_id, sender) => { let removed_pipeline_ids = self.handle_remove_iframe_msg(browsing_context_id); if let Err(e) = sender.send(removed_pipeline_ids) { warn!("Error replying to remove iframe ({})", e); } }, FromScriptMsg::CreateCanvasPaintThread(size, sender) => { self.handle_create_canvas_paint_thread_msg(size, sender) }, FromScriptMsg::SetDocumentState(state) => { self.document_states.insert(source_pipeline_id, state); }, FromScriptMsg::GetClientWindow(send) => { self.compositor_proxy .send(ToCompositorMsg::GetClientWindow(send)); }, FromScriptMsg::GetScreenSize(send) => { self.compositor_proxy .send(ToCompositorMsg::GetScreenSize(send)); }, FromScriptMsg::GetScreenAvailSize(send) => { self.compositor_proxy .send(ToCompositorMsg::GetScreenAvailSize(send)); }, FromScriptMsg::LogEntry(thread_name, entry) => { self.handle_log_entry(Some(source_top_ctx_id), thread_name, entry); }, FromScriptMsg::TouchEventProcessed(result) => self .compositor_proxy .send(ToCompositorMsg::TouchEventProcessed(result)), FromScriptMsg::GetBrowsingContextInfo(pipeline_id, sender) => { let result = self .pipelines .get(&pipeline_id) .and_then(|pipeline| self.browsing_contexts.get(&pipeline.browsing_context_id)) .map(|ctx| (ctx.id, ctx.parent_pipeline_id)); if let Err(e) = sender.send(result) { warn!( "Sending reply to get browsing context info failed ({:?}).", e ); } }, FromScriptMsg::GetTopForBrowsingContext(browsing_context_id, sender) => { let result = self .browsing_contexts .get(&browsing_context_id) .and_then(|bc| Some(bc.top_level_id)); if let Err(e) = sender.send(result) { warn!( "Sending reply to get top for browsing context info failed ({:?}).", e ); } }, FromScriptMsg::GetChildBrowsingContextId(browsing_context_id, index, sender) => { let result = self .browsing_contexts .get(&browsing_context_id) .and_then(|bc| self.pipelines.get(&bc.pipeline_id)) .and_then(|pipeline| pipeline.children.get(index)) .map(|maybe_bcid| *maybe_bcid); if let Err(e) = sender.send(result) { warn!( "Sending reply to get child browsing context ID failed ({:?}).", e ); } }, FromScriptMsg::RegisterServiceWorker(scope_things, scope) => { self.handle_register_serviceworker(scope_things, scope); }, FromScriptMsg::ForwardDOMMessage(msg_vec, scope_url) => { if let Some(ref mgr) = self.swmanager_chan { let _ = mgr.send(ServiceWorkerMsg::ForwardDOMMessage(msg_vec, scope_url)); } else { warn!("Unable to forward DOMMessage for postMessage call"); } }, FromScriptMsg::BroadcastStorageEvent(storage, url, key, old_value, new_value) => { self.handle_broadcast_storage_event( source_pipeline_id, storage, url, key, old_value, new_value, ); }, } } fn handle_request_from_layout(&mut self, message: FromLayoutMsg) { debug!("Constellation got {:?} message", message); match message { FromLayoutMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => { self.handle_change_running_animations_state(pipeline_id, animation_state) }, // Layout sends new sizes for all subframes. This needs to be reflected by all // frame trees in the navigation context containing the subframe. FromLayoutMsg::IFrameSizes(iframe_sizes) => { self.handle_iframe_size_msg(iframe_sizes); }, FromLayoutMsg::PendingPaintMetric(pipeline_id, epoch) => { self.handle_pending_paint_metric(pipeline_id, epoch); }, FromLayoutMsg::ViewportConstrained(pipeline_id, constraints) => { self.handle_viewport_constrained_msg(pipeline_id, constraints); }, } } fn handle_reroute_messageport(&mut self, port_id: MessagePortId, task: PortMessageTask) { let info = match self.message_ports.get_mut(&port_id) { Some(info) => info, None => { return warn!( "Constellation asked to re-route msg to unknown messageport {:?}", port_id ) }, }; match &mut info.state { TransferState::Managed(router_id) => { if let Some(sender) = self.message_port_routers.get(&router_id) { let _ = sender.send(MessagePortMsg::NewTask(port_id, task)); } else { warn!("No message-port sender for {:?}", router_id); } }, TransferState::TransferInProgress(queue) => queue.push_back(task), TransferState::EntangledRemoved => warn!( "Messageport received a message, but entangled has alread been removed {:?}", port_id ), } } fn handle_messageport_shipped(&mut self, port_id: MessagePortId) { if let Some(info) = self.message_ports.get_mut(&port_id) { if let TransferState::Managed(_) = info.state { info.state = TransferState::TransferInProgress(VecDeque::new()); } } else { warn!( "Constellation asked to mark unknown messageport as shipped {:?}", port_id ); } } fn handle_new_messageport_router( &mut self, router_id: MessagePortRouterId, control_sender: IpcSender<MessagePortMsg>, ) { self.message_port_routers.insert(router_id, control_sender); } fn handle_remove_messageport_router(&mut self, router_id: MessagePortRouterId) { self.message_port_routers.remove(&router_id); } fn handle_new_messageport(&mut self, router_id: MessagePortRouterId, port_id: MessagePortId) { match self.message_ports.entry(port_id) { // If we know about this port, it means it was transferred. Entry::Occupied(mut entry) => { if let TransferState::EntangledRemoved = entry.get().state { // If the entangled port has been removed while this one was in-transfer, // remove it now. if let Some(sender) = self.message_port_routers.get(&router_id) { let _ = sender.send(MessagePortMsg::RemoveMessagePort(port_id)); } else { warn!("No message-port sender for {:?}", router_id); } entry.remove_entry(); return; } let new_info = MessagePortInfo { state: TransferState::Managed(router_id), entangled_with: entry.get().entangled_with.clone(), }; let old_info = entry.insert(new_info); let buffer = match old_info.state { TransferState::TransferInProgress(buffer) => buffer, _ => { return warn!("Completing transfer of a port that did not have a transfer in progress."); }, }; // Forward the buffered message-queue. if let Some(sender) = self.message_port_routers.get(&router_id) { let _ = sender.send(MessagePortMsg::CompleteTransfer(port_id.clone(), buffer)); } else { warn!("No message-port sender for {:?}", router_id); } }, Entry::Vacant(entry) => { let info = MessagePortInfo { state: TransferState::Managed(router_id), entangled_with: None, }; entry.insert(info); }, } } fn handle_remove_messageport(&mut self, port_id: MessagePortId) { let entangled = match self.message_ports.remove(&port_id) { Some(info) => info.entangled_with, None => { return warn!( "Constellation asked to remove unknown messageport {:?}", port_id ); }, }; let entangled_id = match entangled { Some(id) => id, None => return, }; let info = match self.message_ports.get_mut(&entangled_id) { Some(info) => info, None => { return warn!( "Constellation asked to remove unknown entangled messageport {:?}", entangled_id ) }, }; let router_id = match info.state { TransferState::EntangledRemoved => return warn!( "Constellation asked to remove entangled messageport by a port that was already removed {:?}", port_id ), TransferState::TransferInProgress(_) => { // Note: since the port is in-transer, we don't have a router to send it a message // to let it know that its entangled port has been removed. // Hence we mark it so that it will be messaged and removed once the transfer completes. info.state = TransferState::EntangledRemoved; return; }, TransferState::Managed(router_id) => router_id, }; if let Some(sender) = self.message_port_routers.get(&router_id) { let _ = sender.send(MessagePortMsg::RemoveMessagePort(entangled_id)); } else { warn!("No message-port sender for {:?}", router_id); } } fn handle_entangle_messageports(&mut self, port1: MessagePortId, port2: MessagePortId) { if let Some(info) = self.message_ports.get_mut(&port1) { info.entangled_with = Some(port2); } else { warn!( "Constellation asked to entangle unknow messageport: {:?}", port1 ); } if let Some(info) = self.message_ports.get_mut(&port2) { info.entangled_with = Some(port1); } else { warn!( "Constellation asked to entangle unknow messageport: {:?}", port2 ); } } fn handle_register_serviceworker(&self, scope_things: ScopeThings, scope: ServoUrl) { if let Some(ref mgr) = self.swmanager_chan { let _ = mgr.send(ServiceWorkerMsg::RegisterServiceWorker(scope_things, scope)); } else { warn!("sending scope info to service worker manager failed"); } } fn handle_broadcast_storage_event( &self, pipeline_id: PipelineId, storage: StorageType, url: ServoUrl, key: Option<String>, old_value: Option<String>, new_value: Option<String>, ) { let origin = url.origin(); for pipeline in self.pipelines.values() { if (pipeline.id != pipeline_id) && (pipeline.url.origin() == origin) { let msg = ConstellationControlMsg::DispatchStorageEvent( pipeline.id, storage, url.clone(), key.clone(), old_value.clone(), new_value.clone(), ); if let Err(err) = pipeline.event_loop.send(msg) { warn!( "Failed to broadcast storage event to pipeline {} ({:?}).", pipeline.id, err ); } } } } fn handle_exit(&mut self) { // TODO: add a timer, which forces shutdown if threads aren't responsive. if self.shutting_down { return; } self.shutting_down = true; self.mem_profiler_chan.send(mem::ProfilerMsg::Exit); // Close the top-level browsing contexts let browsing_context_ids: Vec<BrowsingContextId> = self .browsing_contexts .values() .filter(|browsing_context| browsing_context.is_top_level()) .map(|browsing_context| browsing_context.id) .collect(); for browsing_context_id in browsing_context_ids { debug!( "Removing top-level browsing context {}.", browsing_context_id ); self.close_browsing_context(browsing_context_id, ExitPipelineMode::Normal); } // Close any pending changes and pipelines while let Some(pending) = self.pending_changes.pop() { debug!( "Removing pending browsing context {}.", pending.browsing_context_id ); self.close_browsing_context(pending.browsing_context_id, ExitPipelineMode::Normal); debug!("Removing pending pipeline {}.", pending.new_pipeline_id); self.close_pipeline( pending.new_pipeline_id, DiscardBrowsingContext::Yes, ExitPipelineMode::Normal, ); } // In case there are browsing contexts which weren't attached, we close them. let browsing_context_ids: Vec<BrowsingContextId> = self.browsing_contexts.keys().cloned().collect(); for browsing_context_id in browsing_context_ids { debug!( "Removing detached browsing context {}.", browsing_context_id ); self.close_browsing_context(browsing_context_id, ExitPipelineMode::Normal); } // In case there are pipelines which weren't attached to the pipeline tree, we close them. let pipeline_ids: Vec<PipelineId> = self.pipelines.keys().cloned().collect(); for pipeline_id in pipeline_ids { debug!("Removing detached pipeline {}.", pipeline_id); self.close_pipeline( pipeline_id, DiscardBrowsingContext::Yes, ExitPipelineMode::Normal, ); } } fn handle_shutdown(&mut self) { // At this point, there are no active pipelines, // so we can safely block on other threads, without worrying about deadlock. // Channels to receive signals when threads are done exiting. let (core_sender, core_receiver) = ipc::channel().expect("Failed to create IPC channel!"); let (storage_sender, storage_receiver) = ipc::channel().expect("Failed to create IPC channel!"); debug!("Exiting core resource threads."); if let Err(e) = self .public_resource_threads .send(net_traits::CoreResourceMsg::Exit(core_sender)) { warn!("Exit resource thread failed ({})", e); } if let Some(ref chan) = self.debugger_chan { debugger::shutdown_server(chan); } if let Some(ref chan) = self.devtools_chan { debug!("Exiting devtools."); let msg = DevtoolsControlMsg::FromChrome(ChromeToDevtoolsControlMsg::ServerExitMsg); if let Err(e) = chan.send(msg) { warn!("Exit devtools failed ({:?})", e); } } debug!("Exiting storage resource threads."); if let Err(e) = self .public_resource_threads .send(StorageThreadMsg::Exit(storage_sender)) { warn!("Exit storage thread failed ({})", e); } debug!("Exiting bluetooth thread."); if let Err(e) = self.bluetooth_thread.send(BluetoothRequest::Exit) { warn!("Exit bluetooth thread failed ({})", e); } debug!("Exiting service worker manager thread."); if let Some(mgr) = self.swmanager_chan.as_ref() { if let Err(e) = mgr.send(ServiceWorkerMsg::Exit) { warn!("Exit service worker manager failed ({})", e); } } debug!("Exiting Canvas Paint thread."); if let Err(e) = self.canvas_chan.send(CanvasMsg::Exit) { warn!("Exit Canvas Paint thread failed ({})", e); } if let Some(webgl_threads) = self.webgl_threads.as_ref() { debug!("Exiting WebGL thread."); if let Err(e) = webgl_threads.exit() { warn!("Exit WebGL Thread failed ({})", e); } } if let Some(chan) = self.webvr_chan.as_ref() { debug!("Exiting WebVR thread."); if let Err(e) = chan.send(WebVRMsg::Exit) { warn!("Exit WebVR thread failed ({})", e); } } debug!("Exiting GLPlayer thread."); if let Some(glplayer_threads) = self.glplayer_threads.as_ref() { if let Err(e) = glplayer_threads.exit() { warn!("Exit GLPlayer Thread failed ({})", e); } } debug!("Exiting font cache thread."); self.font_cache_thread.exit(); // Receive exit signals from threads. if let Err(e) = core_receiver.recv() { warn!("Exit resource thread failed ({})", e); } if let Err(e) = storage_receiver.recv() { warn!("Exit storage thread failed ({})", e); } debug!("Asking compositor to complete shutdown."); self.compositor_proxy .send(ToCompositorMsg::ShutdownComplete); } fn handle_pipeline_exited(&mut self, pipeline_id: PipelineId) { debug!("Pipeline {:?} exited.", pipeline_id); self.pipelines.remove(&pipeline_id); } fn handle_send_error(&mut self, pipeline_id: PipelineId, err: IpcError) { // Treat send error the same as receiving a panic message error!("Pipeline {} send error ({}).", pipeline_id, err); let top_level_browsing_context_id = self .pipelines .get(&pipeline_id) .map(|pipeline| pipeline.top_level_browsing_context_id); if let Some(top_level_browsing_context_id) = top_level_browsing_context_id { let reason = format!("Send failed ({})", err); self.handle_panic(top_level_browsing_context_id, reason, None); } } fn handle_panic( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, reason: String, backtrace: Option<String>, ) { if self.hard_fail { // It's quite difficult to make Servo exit cleanly if some threads have failed. // Hard fail exists for test runners so we crash and that's good enough. println!("Pipeline failed in hard-fail mode. Crashing!"); process::exit(1); } debug!( "Panic handler for top-level browsing context {}: {}.", top_level_browsing_context_id, reason ); let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); self.embedder_proxy.send(( Some(top_level_browsing_context_id), EmbedderMsg::Panic(reason, backtrace), )); let browsing_context = match self.browsing_contexts.get(&browsing_context_id) { Some(context) => context, None => return warn!("failed browsing context is missing"), }; let window_size = browsing_context.size; let pipeline_id = browsing_context.pipeline_id; let is_visible = browsing_context.is_visible; let pipeline = match self.pipelines.get(&pipeline_id) { Some(p) => p, None => return warn!("failed pipeline is missing"), }; let pipeline_url = pipeline.url.clone(); let opener = pipeline.opener; self.close_browsing_context_children( browsing_context_id, DiscardBrowsingContext::No, ExitPipelineMode::Force, ); let failure_url = ServoUrl::parse("about:failure").expect("infallible"); if pipeline_url == failure_url { return error!("about:failure failed"); } warn!("creating replacement pipeline for about:failure"); let new_pipeline_id = PipelineId::new(); let load_data = LoadData::new(LoadOrigin::Constellation, failure_url, None, None, None); let sandbox = IFrameSandboxState::IFrameSandboxed; let is_private = false; self.new_pipeline( new_pipeline_id, browsing_context_id, top_level_browsing_context_id, None, opener, window_size, load_data, sandbox, is_private, is_visible, ); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_browsing_context_id, browsing_context_id: browsing_context_id, new_pipeline_id: new_pipeline_id, replace: None, new_browsing_context_info: None, window_size, }); } fn handle_log_entry( &mut self, top_level_browsing_context_id: Option<TopLevelBrowsingContextId>, thread_name: Option<String>, entry: LogEntry, ) { debug!("Received log entry {:?}.", entry); match (entry, top_level_browsing_context_id) { (LogEntry::Panic(reason, backtrace), Some(top_level_browsing_context_id)) => { self.handle_panic(top_level_browsing_context_id, reason, Some(backtrace)); }, (LogEntry::Panic(reason, _), _) | (LogEntry::Error(reason), _) | (LogEntry::Warn(reason), _) => { // VecDeque::truncate is unstable if WARNINGS_BUFFER_SIZE <= self.handled_warnings.len() { self.handled_warnings.pop_front(); } self.handled_warnings.push_back((thread_name, reason)); }, } } fn handle_webvr_events(&mut self, ids: Vec<PipelineId>, events: Vec<WebVREvent>) { for id in ids { match self.pipelines.get_mut(&id) { Some(ref pipeline) => { // Notify script thread let _ = pipeline .event_loop .send(ConstellationControlMsg::WebVREvents(id, events.clone())); }, None => warn!("constellation got webvr event for dead pipeline"), } } } fn forward_event(&mut self, destination_pipeline_id: PipelineId, event: CompositorEvent) { if let MouseButtonEvent(event_type, button, ..) = &event { match event_type { MouseEventType::MouseDown | MouseEventType::Click => { self.pressed_mouse_buttons |= *button as u16; }, MouseEventType::MouseUp => { self.pressed_mouse_buttons &= !(*button as u16); }, } } let event = match event { MouseButtonEvent(event_type, button, point, node_address, point_in_node, _) => { MouseButtonEvent( event_type, button, point, node_address, point_in_node, self.pressed_mouse_buttons, ) }, MouseMoveEvent(point, node_address, _) => { MouseMoveEvent(point, node_address, self.pressed_mouse_buttons) }, _ => event, }; if let MouseButtonEvent(MouseEventType::Click, ..) = event { self.pressed_mouse_buttons = 0; } let msg = ConstellationControlMsg::SendEvent(destination_pipeline_id, event); let result = match self.pipelines.get(&destination_pipeline_id) { None => { debug!( "Pipeline {:?} got event after closure.", destination_pipeline_id ); return; }, Some(pipeline) => pipeline.event_loop.send(msg), }; if let Err(e) = result { self.handle_send_error(destination_pipeline_id, e); } } fn handle_new_top_level_browsing_context( &mut self, url: ServoUrl, top_level_browsing_context_id: TopLevelBrowsingContextId, ) { let window_size = self.window_size.initial_viewport; let pipeline_id = PipelineId::new(); let msg = ( Some(top_level_browsing_context_id), EmbedderMsg::BrowserCreated(top_level_browsing_context_id), ); self.embedder_proxy.send(msg); let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); let load_data = LoadData::new(LoadOrigin::Constellation, url, None, None, None); let sandbox = IFrameSandboxState::IFrameUnsandboxed; let is_private = false; let is_visible = true; // Register this new top-level browsing context id as a browser and set // its focused browsing context to be itself. self.browsers.insert( top_level_browsing_context_id, Browser { focused_browsing_context_id: browsing_context_id, session_history: JointSessionHistory::new(), }, ); // https://html.spec.whatwg.org/multipage/#creating-a-new-browsing-context-group let mut new_bc_group: BrowsingContextGroup = Default::default(); let new_bc_group_id = self.next_browsing_context_group_id(); new_bc_group .top_level_browsing_context_set .insert(top_level_browsing_context_id.clone()); self.browsing_context_group_set .insert(new_bc_group_id, new_bc_group); self.new_pipeline( pipeline_id, browsing_context_id, top_level_browsing_context_id, None, None, window_size, load_data, sandbox, is_private, is_visible, ); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_browsing_context_id, browsing_context_id: browsing_context_id, new_pipeline_id: pipeline_id, replace: None, new_browsing_context_info: Some(NewBrowsingContextInfo { parent_pipeline_id: None, is_private: is_private, is_visible: is_visible, }), window_size, }); } fn handle_close_top_level_browsing_context( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, ) { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); self.close_browsing_context(browsing_context_id, ExitPipelineMode::Normal); self.browsers.remove(&top_level_browsing_context_id); if self.active_browser_id == Some(top_level_browsing_context_id) { self.active_browser_id = None; } let browsing_context = match self.browsing_contexts.get(&browsing_context_id) { Some(bc) => bc, None => { warn!("BC has closed before it has started"); return; }, }; // https://html.spec.whatwg.org/multipage/#bcg-remove self.browsing_context_group_set .remove(&browsing_context.bc_group_id); } fn handle_iframe_size_msg(&mut self, iframe_sizes: Vec<IFrameSizeMsg>) { for IFrameSizeMsg { data, type_ } in iframe_sizes { let window_size = WindowSizeData { initial_viewport: data.size, device_pixel_ratio: self.window_size.device_pixel_ratio, }; self.resize_browsing_context(window_size, type_, data.id); } } fn handle_subframe_loaded(&mut self, pipeline_id: PipelineId) { let browsing_context_id = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.browsing_context_id, None => return warn!("Subframe {} loaded after closure.", pipeline_id), }; let parent_pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.parent_pipeline_id, None => { return warn!( "Subframe {} loaded in closed browsing context {}.", pipeline_id, browsing_context_id, ); }, }; let parent_pipeline_id = match parent_pipeline_id { Some(parent_pipeline_id) => parent_pipeline_id, None => return warn!("Subframe {} has no parent.", pipeline_id), }; // https://html.spec.whatwg.org/multipage/#the-iframe-element:completely-loaded // When a Document in an iframe is marked as completely loaded, // the user agent must run the iframe load event steps. let msg = ConstellationControlMsg::DispatchIFrameLoadEvent { target: browsing_context_id, parent: parent_pipeline_id, child: pipeline_id, }; let result = match self.pipelines.get(&parent_pipeline_id) { Some(parent) => parent.event_loop.send(msg), None => { return warn!( "Parent {} browsing context loaded after closure.", parent_pipeline_id ); }, }; if let Err(e) = result { self.handle_send_error(parent_pipeline_id, e); } } fn handle_navigate_request( &self, id: PipelineId, request_builder: RequestBuilder, cancel_chan: IpcReceiver<()>, ) { let listener = NetworkListener::new( request_builder, id, self.public_resource_threads.clone(), self.network_listener_sender.clone(), ); listener.initiate_fetch(Some(cancel_chan)); } // The script thread associated with pipeline_id has loaded a URL in an // iframe via script. This will result in a new pipeline being spawned and // a child being added to the parent browsing context. This message is never // the result of a page navigation. fn handle_script_loaded_url_in_iframe_msg(&mut self, load_info: IFrameLoadInfoWithData) { let IFrameLoadInfo { parent_pipeline_id, browsing_context_id, top_level_browsing_context_id, new_pipeline_id, is_private, mut replace, } = load_info.info; // If no url is specified, reload. let old_pipeline = load_info .old_pipeline_id .and_then(|id| self.pipelines.get(&id)); // Replacement enabled also takes into account whether the document is "completely loaded", // see https://html.spec.whatwg.org/multipage/#the-iframe-element:completely-loaded debug!("checking old pipeline? {:?}", load_info.old_pipeline_id); if let Some(old_pipeline) = old_pipeline { if !old_pipeline.completely_loaded { replace = HistoryEntryReplacement::Enabled; } debug!( "old pipeline is {}completely loaded", if old_pipeline.completely_loaded { "" } else { "not " } ); } let is_parent_private = { let parent_browsing_context_id = match self.pipelines.get(&parent_pipeline_id) { Some(pipeline) => pipeline.browsing_context_id, None => { return warn!( "Script loaded url in iframe {} in closed parent pipeline {}.", browsing_context_id, parent_pipeline_id, ); }, }; let is_parent_private = match self.browsing_contexts.get(&parent_browsing_context_id) { Some(ctx) => ctx.is_private, None => { return warn!( "Script loaded url in iframe {} in closed parent browsing context {}.", browsing_context_id, parent_browsing_context_id, ); }, }; is_parent_private }; let is_private = is_private || is_parent_private; let browsing_context = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ctx, None => { return warn!( "Script loaded url in iframe with closed browsing context {}.", browsing_context_id, ); }, }; let replace = match replace { HistoryEntryReplacement::Enabled => { Some(NeedsToReload::No(browsing_context.pipeline_id)) }, HistoryEntryReplacement::Disabled => None, }; // https://github.com/rust-lang/rust/issues/59159 let browsing_context_size = browsing_context.size; let browsing_context_is_visible = browsing_context.is_visible; debug_assert_eq!( browsing_context_size, load_info.window_size.initial_viewport ); // Create the new pipeline, attached to the parent and push to pending changes self.new_pipeline( new_pipeline_id, browsing_context_id, top_level_browsing_context_id, Some(parent_pipeline_id), None, browsing_context_size, load_info.load_data, load_info.sandbox, is_private, browsing_context_is_visible, ); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_browsing_context_id, browsing_context_id: browsing_context_id, new_pipeline_id: new_pipeline_id, replace: replace, // Browsing context for iframe already exists. new_browsing_context_info: None, window_size: load_info.window_size.initial_viewport, }); } fn handle_script_new_iframe( &mut self, load_info: IFrameLoadInfoWithData, layout_sender: IpcSender<LayoutControlMsg>, ) { let IFrameLoadInfo { parent_pipeline_id, new_pipeline_id, browsing_context_id, top_level_browsing_context_id, is_private, .. } = load_info.info; let (script_sender, parent_browsing_context_id) = match self.pipelines.get(&parent_pipeline_id) { Some(pipeline) => (pipeline.event_loop.clone(), pipeline.browsing_context_id), None => return warn!("Script loaded url in closed iframe {}.", parent_pipeline_id), }; let (is_parent_private, is_parent_visible) = match self.browsing_contexts.get(&parent_browsing_context_id) { Some(ctx) => (ctx.is_private, ctx.is_visible), None => { return warn!( "New iframe {} loaded in closed parent browsing context {}.", browsing_context_id, parent_browsing_context_id, ); }, }; let is_private = is_private || is_parent_private; let pipeline = Pipeline::new( new_pipeline_id, browsing_context_id, top_level_browsing_context_id, None, script_sender, layout_sender, self.compositor_proxy.clone(), is_parent_visible, load_info.load_data, ); assert!(!self.pipelines.contains_key(&new_pipeline_id)); self.pipelines.insert(new_pipeline_id, pipeline); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_browsing_context_id, browsing_context_id: browsing_context_id, new_pipeline_id: new_pipeline_id, replace: None, // Browsing context for iframe doesn't exist yet. new_browsing_context_info: Some(NewBrowsingContextInfo { parent_pipeline_id: Some(parent_pipeline_id), is_private: is_private, is_visible: is_parent_visible, }), window_size: load_info.window_size.initial_viewport, }); } fn handle_script_new_auxiliary( &mut self, load_info: AuxiliaryBrowsingContextLoadInfo, layout_sender: IpcSender<LayoutControlMsg>, ) { let AuxiliaryBrowsingContextLoadInfo { load_data, opener_pipeline_id, new_top_level_browsing_context_id, new_browsing_context_id, new_pipeline_id, } = load_info; let (script_sender, opener_browsing_context_id) = match self.pipelines.get(&opener_pipeline_id) { Some(pipeline) => (pipeline.event_loop.clone(), pipeline.browsing_context_id), None => { return warn!( "Auxiliary loaded url in closed iframe {}.", opener_pipeline_id ); }, }; let (is_opener_private, is_opener_visible) = match self.browsing_contexts.get(&opener_browsing_context_id) { Some(ctx) => (ctx.is_private, ctx.is_visible), None => { return warn!( "New auxiliary {} loaded in closed opener browsing context {}.", new_browsing_context_id, opener_browsing_context_id, ); }, }; let pipeline = Pipeline::new( new_pipeline_id, new_browsing_context_id, new_top_level_browsing_context_id, Some(opener_browsing_context_id), script_sender, layout_sender, self.compositor_proxy.clone(), is_opener_visible, load_data, ); assert!(!self.pipelines.contains_key(&new_pipeline_id)); self.pipelines.insert(new_pipeline_id, pipeline); self.browsers.insert( new_top_level_browsing_context_id, Browser { focused_browsing_context_id: new_browsing_context_id, session_history: JointSessionHistory::new(), }, ); // https://html.spec.whatwg.org/multipage/#bcg-append let opener = match self.browsing_contexts.get(&opener_browsing_context_id) { Some(id) => id, None => { warn!("Trying to append an unknow auxiliary to a BC group"); return; }, }; let bc_group = match self.browsing_context_group_set.get_mut(&opener.bc_group_id) { Some(bc_group) => bc_group, None => { warn!("Trying to add a top-level to an unknown group."); return; }, }; bc_group .top_level_browsing_context_set .insert(new_top_level_browsing_context_id.clone()); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: new_top_level_browsing_context_id, browsing_context_id: new_browsing_context_id, new_pipeline_id: new_pipeline_id, replace: None, new_browsing_context_info: Some(NewBrowsingContextInfo { // Auxiliary browsing contexts are always top-level. parent_pipeline_id: None, is_private: is_opener_private, is_visible: is_opener_visible, }), window_size: self.window_size.initial_viewport, }); } fn handle_pending_paint_metric(&self, pipeline_id: PipelineId, epoch: Epoch) { self.compositor_proxy .send(ToCompositorMsg::PendingPaintMetric(pipeline_id, epoch)) } fn handle_set_cursor_msg(&mut self, cursor: Cursor) { self.embedder_proxy .send((None, EmbedderMsg::SetCursor(cursor))) } fn handle_change_running_animations_state( &mut self, pipeline_id: PipelineId, animation_state: AnimationState, ) { self.compositor_proxy .send(ToCompositorMsg::ChangeRunningAnimationsState( pipeline_id, animation_state, )) } fn handle_tick_animation(&mut self, pipeline_id: PipelineId, tick_type: AnimationTickType) { let result = match tick_type { AnimationTickType::Script => { let msg = ConstellationControlMsg::TickAllAnimations(pipeline_id); match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.send(msg), None => { return warn!("Pipeline {:?} got script tick after closure.", pipeline_id); }, } }, AnimationTickType::Layout => { let msg = LayoutControlMsg::TickAnimations; match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.layout_chan.send(msg), None => { return warn!("Pipeline {:?} got layout tick after closure.", pipeline_id); }, } }, }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } /// Schedule a navigation(via load_url). /// 1: Ask the embedder for permission. /// 2: Store the details of the navigation, pending approval from the embedder. fn schedule_navigation( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, source_id: PipelineId, load_data: LoadData, replace: HistoryEntryReplacement, ) { match self.pending_approval_navigations.entry(source_id) { Entry::Occupied(_) => { return warn!( "Pipeline {:?} tried to schedule a navigation while one is already pending.", source_id ); }, Entry::Vacant(entry) => { let _ = entry.insert((load_data.clone(), replace)); }, }; // Allow the embedder to handle the url itself let msg = ( Some(top_level_browsing_context_id), EmbedderMsg::AllowNavigationRequest(source_id, load_data.url.clone()), ); self.embedder_proxy.send(msg); } fn load_url( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, source_id: PipelineId, load_data: LoadData, replace: HistoryEntryReplacement, ) -> Option<PipelineId> { let replace_debug = match replace { HistoryEntryReplacement::Enabled => "", HistoryEntryReplacement::Disabled => "not", }; debug!( "Loading {} in pipeline {}, {}replacing.", load_data.url, source_id, replace_debug ); // If this load targets an iframe, its framing element may exist // in a separate script thread than the framed document that initiated // the new load. The framing element must be notified about the // requested change so it can update its internal state. // // If replace is true, the current entry is replaced instead of a new entry being added. let (browsing_context_id, opener) = match self.pipelines.get(&source_id) { Some(pipeline) => (pipeline.browsing_context_id, pipeline.opener), None => { warn!("Pipeline {} loaded after closure.", source_id); return None; }, }; let (window_size, pipeline_id, parent_pipeline_id, is_private, is_visible) = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ( ctx.size, ctx.pipeline_id, ctx.parent_pipeline_id, ctx.is_private, ctx.is_visible, ), None => { // This should technically never happen (since `load_url` is // only called on existing browsing contexts), but we prefer to // avoid `expect`s or `unwrap`s in `Constellation` to ward // against future changes that might break things. warn!( "Pipeline {} loaded url in closed browsing context {}.", source_id, browsing_context_id, ); return None; }, }; match parent_pipeline_id { Some(parent_pipeline_id) => { // Find the script thread for the pipeline containing the iframe // and issue an iframe load through there. let msg = ConstellationControlMsg::NavigateIframe( parent_pipeline_id, browsing_context_id, load_data, replace, ); let result = match self.pipelines.get(&parent_pipeline_id) { Some(parent_pipeline) => parent_pipeline.event_loop.send(msg), None => { warn!( "Pipeline {:?} child loaded after closure", parent_pipeline_id ); return None; }, }; if let Err(e) = result { self.handle_send_error(parent_pipeline_id, e); } None }, None => { // Make sure no pending page would be overridden. for change in &self.pending_changes { if change.browsing_context_id == browsing_context_id { // id that sent load msg is being changed already; abort return None; } } if self.get_activity(source_id) == DocumentActivity::Inactive { // Disregard this load if the navigating pipeline is not actually // active. This could be caused by a delayed navigation (eg. from // a timer) or a race between multiple navigations (such as an // onclick handler on an anchor element). return None; } // Being here means either there are no pending changes, or none of the pending // changes would be overridden by changing the subframe associated with source_id. // Create the new pipeline let replace = match replace { HistoryEntryReplacement::Enabled => Some(NeedsToReload::No(pipeline_id)), HistoryEntryReplacement::Disabled => None, }; let new_pipeline_id = PipelineId::new(); let sandbox = IFrameSandboxState::IFrameUnsandboxed; self.new_pipeline( new_pipeline_id, browsing_context_id, top_level_browsing_context_id, None, opener, window_size, load_data, sandbox, is_private, is_visible, ); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_browsing_context_id, browsing_context_id: browsing_context_id, new_pipeline_id: new_pipeline_id, replace, // `load_url` is always invoked on an existing browsing context. new_browsing_context_info: None, window_size, }); Some(new_pipeline_id) }, } } fn handle_abort_load_url_msg(&mut self, new_pipeline_id: PipelineId) { let pending_index = self .pending_changes .iter() .rposition(|change| change.new_pipeline_id == new_pipeline_id); // If it is found, remove it from the pending changes. if let Some(pending_index) = pending_index { self.pending_changes.remove(pending_index); self.close_pipeline( new_pipeline_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal, ); } } fn handle_load_start_msg( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, browsing_context_id: BrowsingContextId, ) { if browsing_context_id == top_level_browsing_context_id { // Notify embedder top level document started loading. self.embedder_proxy .send((Some(top_level_browsing_context_id), EmbedderMsg::LoadStart)); } } fn handle_load_complete_msg( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, pipeline_id: PipelineId, ) { let mut webdriver_reset = false; if let Some((expected_pipeline_id, ref reply_chan)) = self.webdriver.load_channel { debug!("Sending load to WebDriver"); if expected_pipeline_id == pipeline_id { let _ = reply_chan.send(webdriver_msg::LoadStatus::LoadComplete); webdriver_reset = true; } } if webdriver_reset { self.webdriver.load_channel = None; } if let Some(pipeline) = self.pipelines.get_mut(&pipeline_id) { debug!("marking pipeline {:?} as loaded", pipeline_id); pipeline.completely_loaded = true; } // Notify the embedder that the TopLevelBrowsingContext current document // has finished loading. // We need to make sure the pipeline that has finished loading is the current // pipeline and that no pending pipeline will replace the current one. let pipeline_is_top_level_pipeline = self .browsing_contexts .get(&BrowsingContextId::from(top_level_browsing_context_id)) .map(|ctx| ctx.pipeline_id == pipeline_id) .unwrap_or(false); if pipeline_is_top_level_pipeline { // Is there any pending pipeline that will replace the current top level pipeline let current_top_level_pipeline_will_be_replaced = self .pending_changes .iter() .any(|change| change.browsing_context_id == top_level_browsing_context_id); if !current_top_level_pipeline_will_be_replaced { // Notify embedder and compositor top level document finished loading. self.compositor_proxy .send(ToCompositorMsg::LoadComplete(top_level_browsing_context_id)); self.embedder_proxy.send(( Some(top_level_browsing_context_id), EmbedderMsg::LoadComplete, )); } } else { self.handle_subframe_loaded(pipeline_id); } } fn handle_navigated_to_fragment( &mut self, pipeline_id: PipelineId, new_url: ServoUrl, replacement_enabled: HistoryEntryReplacement, ) { let (top_level_browsing_context_id, old_url) = match self.pipelines.get_mut(&pipeline_id) { Some(pipeline) => { let old_url = replace(&mut pipeline.url, new_url.clone()); (pipeline.top_level_browsing_context_id, old_url) }, None => { return warn!( "Pipeline {} navigated to fragment after closure", pipeline_id ); }, }; match replacement_enabled { HistoryEntryReplacement::Disabled => { let diff = SessionHistoryDiff::HashDiff { pipeline_reloader: NeedsToReload::No(pipeline_id), new_url, old_url, }; self.get_joint_session_history(top_level_browsing_context_id) .push_diff(diff); self.notify_history_changed(top_level_browsing_context_id); }, HistoryEntryReplacement::Enabled => {}, } } fn handle_traverse_history_msg( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, direction: TraversalDirection, ) { let mut browsing_context_changes = HashMap::<BrowsingContextId, NeedsToReload>::new(); let mut pipeline_changes = HashMap::<PipelineId, (Option<HistoryStateId>, ServoUrl)>::new(); let mut url_to_load = HashMap::<PipelineId, ServoUrl>::new(); { let session_history = self.get_joint_session_history(top_level_browsing_context_id); match direction { TraversalDirection::Forward(forward) => { let future_length = session_history.future.len(); if future_length < forward { return warn!("Cannot traverse that far into the future."); } for diff in session_history .future .drain(future_length - forward..) .rev() { match diff { SessionHistoryDiff::BrowsingContextDiff { browsing_context_id, ref new_reloader, .. } => { browsing_context_changes .insert(browsing_context_id, new_reloader.clone()); }, SessionHistoryDiff::PipelineDiff { ref pipeline_reloader, new_history_state_id, ref new_url, .. } => match *pipeline_reloader { NeedsToReload::No(pipeline_id) => { pipeline_changes.insert( pipeline_id, (Some(new_history_state_id), new_url.clone()), ); }, NeedsToReload::Yes(pipeline_id, ..) => { url_to_load.insert(pipeline_id, new_url.clone()); }, }, SessionHistoryDiff::HashDiff { ref pipeline_reloader, ref new_url, .. } => match *pipeline_reloader { NeedsToReload::No(pipeline_id) => { let state = pipeline_changes .get(&pipeline_id) .and_then(|change| change.0); pipeline_changes.insert(pipeline_id, (state, new_url.clone())); }, NeedsToReload::Yes(pipeline_id, ..) => { url_to_load.insert(pipeline_id, new_url.clone()); }, }, } session_history.past.push(diff); } }, TraversalDirection::Back(back) => { let past_length = session_history.past.len(); if past_length < back { return warn!("Cannot traverse that far into the past."); } for diff in session_history.past.drain(past_length - back..).rev() { match diff { SessionHistoryDiff::BrowsingContextDiff { browsing_context_id, ref old_reloader, .. } => { browsing_context_changes .insert(browsing_context_id, old_reloader.clone()); }, SessionHistoryDiff::PipelineDiff { ref pipeline_reloader, old_history_state_id, ref old_url, .. } => match *pipeline_reloader { NeedsToReload::No(pipeline_id) => { pipeline_changes.insert( pipeline_id, (old_history_state_id, old_url.clone()), ); }, NeedsToReload::Yes(pipeline_id, ..) => { url_to_load.insert(pipeline_id, old_url.clone()); }, }, SessionHistoryDiff::HashDiff { ref pipeline_reloader, ref old_url, .. } => match *pipeline_reloader { NeedsToReload::No(pipeline_id) => { let state = pipeline_changes .get(&pipeline_id) .and_then(|change| change.0); pipeline_changes.insert(pipeline_id, (state, old_url.clone())); }, NeedsToReload::Yes(pipeline_id, ..) => { url_to_load.insert(pipeline_id, old_url.clone()); }, }, } session_history.future.push(diff); } }, } } for (browsing_context_id, mut pipeline_reloader) in browsing_context_changes.drain() { if let NeedsToReload::Yes(pipeline_id, ref mut load_data) = pipeline_reloader { if let Some(url) = url_to_load.get(&pipeline_id) { load_data.url = url.clone(); } } self.update_browsing_context(browsing_context_id, pipeline_reloader); } for (pipeline_id, (history_state_id, url)) in pipeline_changes.drain() { self.update_pipeline(pipeline_id, history_state_id, url); } self.notify_history_changed(top_level_browsing_context_id); self.trim_history(top_level_browsing_context_id); self.update_frame_tree_if_active(top_level_browsing_context_id); } fn update_browsing_context( &mut self, browsing_context_id: BrowsingContextId, new_reloader: NeedsToReload, ) { let new_pipeline_id = match new_reloader { NeedsToReload::No(pipeline_id) => pipeline_id, NeedsToReload::Yes(pipeline_id, load_data) => { debug!( "Reloading document {} in browsing context {}.", pipeline_id, browsing_context_id ); // TODO: Save the sandbox state so it can be restored here. let sandbox = IFrameSandboxState::IFrameUnsandboxed; let ( top_level_id, old_pipeline_id, parent_pipeline_id, window_size, is_private, is_visible, ) = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ( ctx.top_level_id, ctx.pipeline_id, ctx.parent_pipeline_id, ctx.size, ctx.is_private, ctx.is_visible, ), None => return warn!("No browsing context to traverse!"), }; let opener = match self.pipelines.get(&old_pipeline_id) { Some(pipeline) => pipeline.opener, None => None, }; let new_pipeline_id = PipelineId::new(); self.new_pipeline( new_pipeline_id, browsing_context_id, top_level_id, parent_pipeline_id, opener, window_size, load_data.clone(), sandbox, is_private, is_visible, ); self.add_pending_change(SessionHistoryChange { top_level_browsing_context_id: top_level_id, browsing_context_id: browsing_context_id, new_pipeline_id: new_pipeline_id, replace: Some(NeedsToReload::Yes(pipeline_id, load_data)), // Browsing context must exist at this point. new_browsing_context_info: None, window_size, }); return; }, }; let (old_pipeline_id, parent_pipeline_id, top_level_id) = match self.browsing_contexts.get_mut(&browsing_context_id) { Some(browsing_context) => { let old_pipeline_id = browsing_context.pipeline_id; browsing_context.update_current_entry(new_pipeline_id); ( old_pipeline_id, browsing_context.parent_pipeline_id, browsing_context.top_level_id, ) }, None => { return warn!( "Browsing context {} was closed during traversal", browsing_context_id ); }, }; if let Some(old_pipeline) = self.pipelines.get(&old_pipeline_id) { old_pipeline.notify_visibility(false); } if let Some(new_pipeline) = self.pipelines.get(&new_pipeline_id) { new_pipeline.notify_visibility(true); } self.update_activity(old_pipeline_id); self.update_activity(new_pipeline_id); if let Some(parent_pipeline_id) = parent_pipeline_id { let msg = ConstellationControlMsg::UpdatePipelineId( parent_pipeline_id, browsing_context_id, top_level_id, new_pipeline_id, UpdatePipelineIdReason::Traversal, ); let result = match self.pipelines.get(&parent_pipeline_id) { None => { return warn!( "Pipeline {} child traversed after closure", parent_pipeline_id ); }, Some(pipeline) => pipeline.event_loop.send(msg), }; if let Err(e) = result { self.handle_send_error(parent_pipeline_id, e); } } } fn update_pipeline( &mut self, pipeline_id: PipelineId, history_state_id: Option<HistoryStateId>, url: ServoUrl, ) { let result = match self.pipelines.get_mut(&pipeline_id) { None => { return warn!( "Pipeline {} history state updated after closure", pipeline_id ); }, Some(pipeline) => { let msg = ConstellationControlMsg::UpdateHistoryState( pipeline_id, history_state_id, url.clone(), ); pipeline.history_state_id = history_state_id; pipeline.url = url; pipeline.event_loop.send(msg) }, }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } fn handle_joint_session_history_length( &self, top_level_browsing_context_id: TopLevelBrowsingContextId, sender: IpcSender<u32>, ) { let length = self .browsers .get(&top_level_browsing_context_id) .map(|browser| browser.session_history.history_length()) .unwrap_or(1); let _ = sender.send(length as u32); } fn handle_push_history_state_msg( &mut self, pipeline_id: PipelineId, history_state_id: HistoryStateId, url: ServoUrl, ) { let (top_level_browsing_context_id, old_state_id, old_url) = match self.pipelines.get_mut(&pipeline_id) { Some(pipeline) => { let old_history_state_id = pipeline.history_state_id; let old_url = replace(&mut pipeline.url, url.clone()); pipeline.history_state_id = Some(history_state_id); pipeline.history_states.insert(history_state_id); ( pipeline.top_level_browsing_context_id, old_history_state_id, old_url, ) }, None => { return warn!( "Push history state {} for closed pipeline {}", history_state_id, pipeline_id ); }, }; let diff = SessionHistoryDiff::PipelineDiff { pipeline_reloader: NeedsToReload::No(pipeline_id), new_history_state_id: history_state_id, new_url: url, old_history_state_id: old_state_id, old_url: old_url, }; self.get_joint_session_history(top_level_browsing_context_id) .push_diff(diff); self.notify_history_changed(top_level_browsing_context_id); } fn handle_replace_history_state_msg( &mut self, pipeline_id: PipelineId, history_state_id: HistoryStateId, url: ServoUrl, ) { let top_level_browsing_context_id = match self.pipelines.get_mut(&pipeline_id) { Some(pipeline) => { pipeline.history_state_id = Some(history_state_id); pipeline.url = url.clone(); pipeline.top_level_browsing_context_id }, None => { return warn!( "Replace history state {} for closed pipeline {}", history_state_id, pipeline_id ); }, }; let session_history = self.get_joint_session_history(top_level_browsing_context_id); session_history.replace_history_state(pipeline_id, history_state_id, url); } fn handle_key_msg(&mut self, event: KeyboardEvent) { // Send to the focused browsing contexts' current pipeline. If it // doesn't exist, fall back to sending to the compositor. let focused_browsing_context_id = self .active_browser_id .and_then(|browser_id| self.browsers.get(&browser_id)) .map(|browser| browser.focused_browsing_context_id); match focused_browsing_context_id { Some(browsing_context_id) => { let event = CompositorEvent::KeyboardEvent(event); let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ctx.pipeline_id, None => { return warn!( "Got key event for nonexistent browsing context {}.", browsing_context_id, ); }, }; let msg = ConstellationControlMsg::SendEvent(pipeline_id, event); let result = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.send(msg), None => { return debug!("Pipeline {:?} got key event after closure.", pipeline_id); }, }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } }, None => { let event = (None, EmbedderMsg::Keyboard(event)); self.embedder_proxy.send(event); }, } } fn handle_reload_msg(&mut self, top_level_browsing_context_id: TopLevelBrowsingContextId) { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Browsing context {} got reload event after closure.", browsing_context_id ); }, }; let msg = ConstellationControlMsg::Reload(pipeline_id); let result = match self.pipelines.get(&pipeline_id) { None => return warn!("Pipeline {} got reload event after closure.", pipeline_id), Some(pipeline) => pipeline.event_loop.send(msg), }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } fn handle_post_message_msg( &mut self, browsing_context_id: BrowsingContextId, source_pipeline: PipelineId, origin: Option<ImmutableOrigin>, source_origin: ImmutableOrigin, data: StructuredSerializedData, ) { let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { None => { return warn!( "PostMessage to closed browsing_context {}.", browsing_context_id ); }, Some(browsing_context) => browsing_context.pipeline_id, }; let source_browsing_context = match self.pipelines.get(&source_pipeline) { Some(pipeline) => pipeline.top_level_browsing_context_id, None => return warn!("PostMessage from closed pipeline {:?}", source_pipeline), }; let msg = ConstellationControlMsg::PostMessage { target: pipeline_id, source: source_pipeline, source_browsing_context: source_browsing_context, target_origin: origin, source_origin, data, }; let result = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.send(msg), None => return warn!("postMessage to closed pipeline {}.", pipeline_id), }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } fn handle_get_pipeline( &mut self, browsing_context_id: BrowsingContextId, resp_chan: IpcSender<Option<PipelineId>>, ) { let current_pipeline_id = self .browsing_contexts .get(&browsing_context_id) .map(|browsing_context| browsing_context.pipeline_id); let pipeline_id_loaded = self .pending_changes .iter() .rev() .find(|x| x.browsing_context_id == browsing_context_id) .map(|x| x.new_pipeline_id) .or(current_pipeline_id); if let Err(e) = resp_chan.send(pipeline_id_loaded) { warn!("Failed get_pipeline response ({}).", e); } } fn handle_get_browsing_context( &mut self, pipeline_id: PipelineId, resp_chan: IpcSender<Option<BrowsingContextId>>, ) { let browsing_context_id = self .pipelines .get(&pipeline_id) .map(|pipeline| pipeline.browsing_context_id); if let Err(e) = resp_chan.send(browsing_context_id) { warn!("Failed get_browsing_context response ({}).", e); } } fn handle_focus_msg(&mut self, pipeline_id: PipelineId) { let (browsing_context_id, top_level_browsing_context_id) = match self.pipelines.get(&pipeline_id) { Some(pipeline) => ( pipeline.browsing_context_id, pipeline.top_level_browsing_context_id, ), None => return warn!("Pipeline {:?} focus parent after closure.", pipeline_id), }; // Update the focused browsing context in its browser in `browsers`. match self.browsers.get_mut(&top_level_browsing_context_id) { Some(browser) => { browser.focused_browsing_context_id = browsing_context_id; }, None => { return warn!( "Browser {} for focus msg does not exist", top_level_browsing_context_id ); }, }; // Focus parent iframes recursively self.focus_parent_pipeline(browsing_context_id); } fn focus_parent_pipeline(&mut self, browsing_context_id: BrowsingContextId) { let parent_pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ctx.parent_pipeline_id, None => { return warn!( "Browsing context {:?} focus parent after closure.", browsing_context_id ); }, }; let parent_pipeline_id = match parent_pipeline_id { Some(parent_id) => parent_id, None => { return debug!( "Browsing context {:?} focus has no parent.", browsing_context_id ); }, }; // Send a message to the parent of the provided browsing context (if it // exists) telling it to mark the iframe element as focused. let msg = ConstellationControlMsg::FocusIFrame(parent_pipeline_id, browsing_context_id); let (result, parent_browsing_context_id) = match self.pipelines.get(&parent_pipeline_id) { Some(pipeline) => { let result = pipeline.event_loop.send(msg); (result, pipeline.browsing_context_id) }, None => return warn!("Pipeline {:?} focus after closure.", parent_pipeline_id), }; if let Err(e) = result { self.handle_send_error(parent_pipeline_id, e); } self.focus_parent_pipeline(parent_browsing_context_id); } fn handle_remove_iframe_msg( &mut self, browsing_context_id: BrowsingContextId, ) -> Vec<PipelineId> { let result = self .all_descendant_browsing_contexts_iter(browsing_context_id) .flat_map(|browsing_context| browsing_context.pipelines.iter().cloned()) .collect(); self.close_browsing_context(browsing_context_id, ExitPipelineMode::Normal); result } fn handle_visibility_change_complete(&mut self, pipeline_id: PipelineId, visibility: bool) { let browsing_context_id = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.browsing_context_id, None => return warn!("Visibity change for closed pipeline {:?}.", pipeline_id), }; let parent_pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(ctx) => ctx.parent_pipeline_id, None => { return warn!( "Visibility change for closed browsing context {:?}.", pipeline_id ); }, }; if let Some(parent_pipeline_id) = parent_pipeline_id { let visibility_msg = ConstellationControlMsg::NotifyVisibilityChange( parent_pipeline_id, browsing_context_id, visibility, ); let result = match self.pipelines.get(&parent_pipeline_id) { None => return warn!("Parent pipeline {:?} closed", parent_pipeline_id), Some(parent_pipeline) => parent_pipeline.event_loop.send(visibility_msg), }; if let Err(e) = result { self.handle_send_error(parent_pipeline_id, e); } } } fn handle_create_canvas_paint_thread_msg( &mut self, size: UntypedSize2D<u64>, response_sender: IpcSender<(IpcSender<CanvasMsg>, CanvasId)>, ) { let webrender_api = self.webrender_api_sender.clone(); let sender = self.canvas_chan.clone(); let (canvas_id_sender, canvas_id_receiver) = ipc::channel::<CanvasId>().expect("ipc channel failure"); <|fim▁hole|> size, webrender_api, self.enable_canvas_antialiasing, )) { return warn!("Create canvas paint thread failed ({})", e); } let canvas_id = match canvas_id_receiver.recv() { Ok(canvas_id) => canvas_id, Err(e) => return warn!("Create canvas paint thread id response failed ({})", e), }; if let Err(e) = response_sender.send((sender, canvas_id.clone())) { warn!("Create canvas paint thread response failed ({})", e); } } fn handle_webdriver_msg(&mut self, msg: WebDriverCommandMsg) { // Find the script channel for the given parent pipeline, // and pass the event to that script thread. match msg { WebDriverCommandMsg::GetWindowSize(_, reply) => { let _ = reply.send(self.window_size); }, WebDriverCommandMsg::SetWindowSize(top_level_browsing_context_id, size, reply) => { self.webdriver.resize_channel = Some(reply); self.embedder_proxy.send(( Some(top_level_browsing_context_id), EmbedderMsg::ResizeTo(size), )); }, WebDriverCommandMsg::LoadUrl(top_level_browsing_context_id, load_data, reply) => { self.load_url_for_webdriver( top_level_browsing_context_id, load_data, reply, HistoryEntryReplacement::Disabled, ); }, WebDriverCommandMsg::Refresh(top_level_browsing_context_id, reply) => { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Browsing context {} Refresh after closure.", browsing_context_id ); }, }; let load_data = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.load_data.clone(), None => return warn!("Pipeline {} refresh after closure.", pipeline_id), }; self.load_url_for_webdriver( top_level_browsing_context_id, load_data, reply, HistoryEntryReplacement::Enabled, ); }, WebDriverCommandMsg::ScriptCommand(browsing_context_id, cmd) => { let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Browsing context {} ScriptCommand after closure.", browsing_context_id ); }, }; let control_msg = ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, cmd); let result = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.send(control_msg), None => { return warn!("Pipeline {:?} ScriptCommand after closure.", pipeline_id) }, }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } }, WebDriverCommandMsg::SendKeys(browsing_context_id, cmd) => { let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Browsing context {} SendKeys after closure.", browsing_context_id ); }, }; let event_loop = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.clone(), None => return warn!("Pipeline {} SendKeys after closure.", pipeline_id), }; for event in cmd { let event = match event { WebDriverInputEvent::Keyboard(event) => { CompositorEvent::KeyboardEvent(event) }, WebDriverInputEvent::Composition(event) => { CompositorEvent::CompositionEvent(event) }, }; let control_msg = ConstellationControlMsg::SendEvent(pipeline_id, event); if let Err(e) = event_loop.send(control_msg) { return self.handle_send_error(pipeline_id, e); } } }, WebDriverCommandMsg::KeyboardAction(browsing_context_id, event) => { let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Browsing context {} KeyboardAction after closure.", browsing_context_id ); }, }; let event_loop = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.event_loop.clone(), None => return warn!("Pipeline {} KeyboardAction after closure.", pipeline_id), }; let control_msg = ConstellationControlMsg::SendEvent( pipeline_id, CompositorEvent::KeyboardEvent(event), ); if let Err(e) = event_loop.send(control_msg) { return self.handle_send_error(pipeline_id, e); } }, WebDriverCommandMsg::MouseButtonAction(mouse_event_type, mouse_button, x, y) => { self.compositor_proxy .send(ToCompositorMsg::WebDriverMouseButtonEvent( mouse_event_type, mouse_button, x, y, )); }, WebDriverCommandMsg::MouseMoveAction(x, y) => { self.compositor_proxy .send(ToCompositorMsg::WebDriverMouseMoveEvent(x, y)); }, WebDriverCommandMsg::TakeScreenshot(_, rect, reply) => { self.compositor_proxy .send(ToCompositorMsg::CreatePng(rect, reply)); }, } } fn notify_history_changed(&self, top_level_browsing_context_id: TopLevelBrowsingContextId) { // Send a flat projection of the history to embedder. // The final vector is a concatenation of the LoadData of the past // entries, the current entry and the future entries. // LoadData of inner frames are ignored and replaced with the LoadData // of the parent. let session_history = match self.browsers.get(&top_level_browsing_context_id) { Some(browser) => &browser.session_history, None => { return warn!( "Session history does not exist for {}", top_level_browsing_context_id ); }, }; let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); let browsing_context = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context, None => { return warn!( "notify_history_changed error after top-level browsing context closed." ); }, }; let current_load_data = match self.pipelines.get(&browsing_context.pipeline_id) { Some(pipeline) => pipeline.load_data.clone(), None => { return warn!( "Pipeline {} refresh after closure.", browsing_context.pipeline_id ); }, }; // If LoadData was ignored, use the LoadData of the previous SessionHistoryEntry, which // is the LoadData of the parent browsing context. let resolve_load_data_future = |previous_load_data: &mut LoadData, diff: &SessionHistoryDiff| match *diff { SessionHistoryDiff::BrowsingContextDiff { browsing_context_id, ref new_reloader, .. } => { if browsing_context_id == top_level_browsing_context_id { let load_data = match *new_reloader { NeedsToReload::No(pipeline_id) => { match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.load_data.clone(), None => previous_load_data.clone(), } }, NeedsToReload::Yes(_, ref load_data) => load_data.clone(), }; *previous_load_data = load_data.clone(); Some(load_data) } else { Some(previous_load_data.clone()) } }, _ => Some(previous_load_data.clone()), }; let resolve_load_data_past = |previous_load_data: &mut LoadData, diff: &SessionHistoryDiff| match *diff { SessionHistoryDiff::BrowsingContextDiff { browsing_context_id, ref old_reloader, .. } => { if browsing_context_id == top_level_browsing_context_id { let load_data = match *old_reloader { NeedsToReload::No(pipeline_id) => { match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.load_data.clone(), None => previous_load_data.clone(), } }, NeedsToReload::Yes(_, ref load_data) => load_data.clone(), }; *previous_load_data = load_data.clone(); Some(load_data) } else { Some(previous_load_data.clone()) } }, _ => Some(previous_load_data.clone()), }; let mut entries: Vec<LoadData> = session_history .past .iter() .rev() .scan(current_load_data.clone(), &resolve_load_data_past) .collect(); entries.reverse(); let current_index = entries.len(); entries.push(current_load_data.clone()); entries.extend( session_history .future .iter() .rev() .scan(current_load_data, &resolve_load_data_future), ); let urls = entries.iter().map(|entry| entry.url.clone()).collect(); let msg = ( Some(top_level_browsing_context_id), EmbedderMsg::HistoryChanged(urls, current_index), ); self.embedder_proxy.send(msg); } fn load_url_for_webdriver( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, load_data: LoadData, reply: IpcSender<webdriver_msg::LoadStatus>, replace: HistoryEntryReplacement, ) { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); let pipeline_id = match self.browsing_contexts.get(&browsing_context_id) { Some(browsing_context) => browsing_context.pipeline_id, None => { return warn!( "Webdriver load for closed browsing context {}.", browsing_context_id ); }, }; if let Some(new_pipeline_id) = self.load_url( top_level_browsing_context_id, pipeline_id, load_data, replace, ) { self.webdriver.load_channel = Some((new_pipeline_id, reply)); } } fn change_session_history(&mut self, change: SessionHistoryChange) { debug!( "Setting browsing context {} to be pipeline {}.", change.browsing_context_id, change.new_pipeline_id ); // If the currently focused browsing context is a child of the browsing // context in which the page is being loaded, then update the focused // browsing context to be the one where the page is being loaded. if self.focused_browsing_context_is_descendant_of(change.browsing_context_id) { self.browsers .entry(change.top_level_browsing_context_id) .and_modify(|browser| { browser.focused_browsing_context_id = change.browsing_context_id }); } let (old_pipeline_id, top_level_id) = match self.browsing_contexts.get_mut(&change.browsing_context_id) { Some(browsing_context) => { debug!("Adding pipeline to existing browsing context."); let old_pipeline_id = browsing_context.pipeline_id; browsing_context.pipelines.insert(change.new_pipeline_id); browsing_context.update_current_entry(change.new_pipeline_id); (Some(old_pipeline_id), Some(browsing_context.top_level_id)) }, None => { debug!("Adding pipeline to new browsing context."); (None, None) }, }; match old_pipeline_id { None => { let new_context_info = match change.new_browsing_context_info { Some(info) => info, None => { return warn!( "No NewBrowsingContextInfo for browsing context {}", change.browsing_context_id, ); }, }; self.new_browsing_context( change.browsing_context_id, change.top_level_browsing_context_id, change.new_pipeline_id, new_context_info.parent_pipeline_id, change.window_size, new_context_info.is_private, new_context_info.is_visible, ); self.update_activity(change.new_pipeline_id); }, Some(old_pipeline_id) => { if let Some(pipeline) = self.pipelines.get(&old_pipeline_id) { pipeline.notify_visibility(false); } // https://html.spec.whatwg.org/multipage/#unload-a-document self.unload_document(old_pipeline_id); // Deactivate the old pipeline, and activate the new one. let (pipelines_to_close, states_to_close) = if let Some(replace_reloader) = change.replace { self.get_joint_session_history(change.top_level_browsing_context_id) .replace_reloader( replace_reloader.clone(), NeedsToReload::No(change.new_pipeline_id), ); match replace_reloader { NeedsToReload::No(pipeline_id) => (Some(vec![pipeline_id]), None), NeedsToReload::Yes(..) => (None, None), } } else { let diff = SessionHistoryDiff::BrowsingContextDiff { browsing_context_id: change.browsing_context_id, new_reloader: NeedsToReload::No(change.new_pipeline_id), old_reloader: NeedsToReload::No(old_pipeline_id), }; let mut pipelines_to_close = vec![]; let mut states_to_close = HashMap::new(); let diffs_to_close = self .get_joint_session_history(change.top_level_browsing_context_id) .push_diff(diff); for diff in diffs_to_close { match diff { SessionHistoryDiff::BrowsingContextDiff { new_reloader, .. } => { if let Some(pipeline_id) = new_reloader.alive_pipeline_id() { pipelines_to_close.push(pipeline_id); } }, SessionHistoryDiff::PipelineDiff { pipeline_reloader, new_history_state_id, .. } => { if let Some(pipeline_id) = pipeline_reloader.alive_pipeline_id() { let states = states_to_close.entry(pipeline_id).or_insert(Vec::new()); states.push(new_history_state_id); } }, _ => {}, } } (Some(pipelines_to_close), Some(states_to_close)) }; self.update_activity(old_pipeline_id); self.update_activity(change.new_pipeline_id); if let Some(states_to_close) = states_to_close { for (pipeline_id, states) in states_to_close { let msg = ConstellationControlMsg::RemoveHistoryStates(pipeline_id, states); let result = match self.pipelines.get(&pipeline_id) { None => { return warn!( "Pipeline {} removed history states after closure", pipeline_id ); }, Some(pipeline) => pipeline.event_loop.send(msg), }; if let Err(e) = result { self.handle_send_error(pipeline_id, e); } } } if let Some(pipelines_to_close) = pipelines_to_close { for pipeline_id in pipelines_to_close { self.close_pipeline( pipeline_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal, ); } } }, } if let Some(top_level_id) = top_level_id { self.trim_history(top_level_id); } self.notify_history_changed(change.top_level_browsing_context_id); self.update_frame_tree_if_active(change.top_level_browsing_context_id); } fn focused_browsing_context_is_descendant_of( &self, browsing_context_id: BrowsingContextId, ) -> bool { let focused_browsing_context_id = self .active_browser_id .and_then(|browser_id| self.browsers.get(&browser_id)) .map(|browser| browser.focused_browsing_context_id); focused_browsing_context_id.map_or(false, |focus_ctx_id| { focus_ctx_id == browsing_context_id || self.fully_active_descendant_browsing_contexts_iter(browsing_context_id) .any(|nested_ctx| nested_ctx.id == focus_ctx_id) }) } fn trim_history(&mut self, top_level_browsing_context_id: TopLevelBrowsingContextId) { let pipelines_to_evict = { let session_history = self.get_joint_session_history(top_level_browsing_context_id); let history_length = pref!(session_history.max_length) as usize; // The past is stored with older entries at the front. // We reverse the iter so that newer entries are at the front and then // skip _n_ entries and evict the remaining entries. let mut pipelines_to_evict = session_history .past .iter() .rev() .map(|diff| diff.alive_old_pipeline()) .skip(history_length) .filter_map(|maybe_pipeline| maybe_pipeline) .collect::<Vec<_>>(); // The future is stored with oldest entries front, so we must // reverse the iterator like we do for the `past`. pipelines_to_evict.extend( session_history .future .iter() .rev() .map(|diff| diff.alive_new_pipeline()) .skip(history_length) .filter_map(|maybe_pipeline| maybe_pipeline), ); pipelines_to_evict }; let mut dead_pipelines = vec![]; for evicted_id in pipelines_to_evict { let load_data = match self.pipelines.get(&evicted_id) { Some(pipeline) => { let mut load_data = pipeline.load_data.clone(); load_data.url = pipeline.url.clone(); load_data }, None => continue, }; dead_pipelines.push((evicted_id, NeedsToReload::Yes(evicted_id, load_data))); self.close_pipeline( evicted_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal, ); } let session_history = self.get_joint_session_history(top_level_browsing_context_id); for (alive_id, dead) in dead_pipelines { session_history.replace_reloader(NeedsToReload::No(alive_id), dead); } } fn handle_activate_document_msg(&mut self, pipeline_id: PipelineId) { debug!("Document ready to activate {}", pipeline_id); // Find the pending change whose new pipeline id is pipeline_id. let pending_index = self .pending_changes .iter() .rposition(|change| change.new_pipeline_id == pipeline_id); // If it is found, remove it from the pending changes, and make it // the active document of its frame. if let Some(pending_index) = pending_index { let change = self.pending_changes.swap_remove(pending_index); // Notify the parent (if there is one). let parent_pipeline_id = match change.new_browsing_context_info { // This will be a new browsing context. Some(ref info) => info.parent_pipeline_id, // This is an existing browsing context. None => match self.browsing_contexts.get(&change.browsing_context_id) { Some(ctx) => ctx.parent_pipeline_id, None => { return warn!( "Activated document {} after browsing context {} closure.", change.new_pipeline_id, change.browsing_context_id, ); }, }, }; if let Some(parent_pipeline_id) = parent_pipeline_id { if let Some(parent_pipeline) = self.pipelines.get(&parent_pipeline_id) { let msg = ConstellationControlMsg::UpdatePipelineId( parent_pipeline_id, change.browsing_context_id, change.top_level_browsing_context_id, pipeline_id, UpdatePipelineIdReason::Navigation, ); let _ = parent_pipeline.event_loop.send(msg); } } self.change_session_history(change); } } /// Called when the window is resized. fn handle_window_size_msg( &mut self, top_level_browsing_context_id: Option<TopLevelBrowsingContextId>, new_size: WindowSizeData, size_type: WindowSizeType, ) { debug!( "handle_window_size_msg: {:?}", new_size.initial_viewport.to_untyped() ); if let Some(top_level_browsing_context_id) = top_level_browsing_context_id { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); self.resize_browsing_context(new_size, size_type, browsing_context_id); } if let Some(resize_channel) = self.webdriver.resize_channel.take() { let _ = resize_channel.send(new_size); } self.window_size = new_size; } /// Called when the window exits from fullscreen mode fn handle_exit_fullscreen_msg( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, ) { let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); self.switch_fullscreen_mode(browsing_context_id); } /// Handle updating actual viewport / zoom due to @viewport rules fn handle_viewport_constrained_msg( &mut self, pipeline_id: PipelineId, constraints: ViewportConstraints, ) { self.compositor_proxy .send(ToCompositorMsg::ViewportConstrained( pipeline_id, constraints, )); } /// Checks the state of all script and layout pipelines to see if they are idle /// and compares the current layout state to what the compositor has. This is used /// to check if the output image is "stable" and can be written as a screenshot /// for reftests. /// Since this function is only used in reftests, we do not harden it against panic. fn handle_is_ready_to_save_image( &mut self, pipeline_states: HashMap<PipelineId, Epoch>, ) -> ReadyToSave { // Note that this function can panic, due to ipc-channel creation // failure. Avoiding this panic would require a mechanism for dealing // with low-resource scenarios. // // If there is no focus browsing context yet, the initial page has // not loaded, so there is nothing to save yet. let top_level_browsing_context_id = match self.active_browser_id { Some(id) => id, None => return ReadyToSave::NoTopLevelBrowsingContext, }; // If there are pending loads, wait for those to complete. if !self.pending_changes.is_empty() { return ReadyToSave::PendingChanges; } let (state_sender, state_receiver) = ipc::channel().expect("Failed to create IPC channel!"); let (epoch_sender, epoch_receiver) = ipc::channel().expect("Failed to create IPC channel!"); // Step through the fully active browsing contexts, checking that the script // thread is idle, and that the current epoch of the layout thread // matches what the compositor has painted. If all these conditions // are met, then the output image should not change and a reftest // screenshot can safely be written. for browsing_context in self.fully_active_browsing_contexts_iter(top_level_browsing_context_id) { let pipeline_id = browsing_context.pipeline_id; debug!( "Checking readiness of browsing context {}, pipeline {}.", browsing_context.id, pipeline_id ); let pipeline = match self.pipelines.get(&pipeline_id) { None => { warn!("Pipeline {} screenshot while closing.", pipeline_id); continue; }, Some(pipeline) => pipeline, }; // Check to see if there are any webfonts still loading. // // If GetWebFontLoadState returns false, either there are no // webfonts loading, or there's a WebFontLoaded message waiting in // script_chan's message queue. Therefore, we need to check this // before we check whether the document is ready; otherwise, // there's a race condition where a webfont has finished loading, // but hasn't yet notified the document. let msg = LayoutControlMsg::GetWebFontLoadState(state_sender.clone()); if let Err(e) = pipeline.layout_chan.send(msg) { warn!("Get web font failed ({})", e); } if state_receiver.recv().unwrap_or(true) { return ReadyToSave::WebFontNotLoaded; } // See if this pipeline has reached idle script state yet. match self.document_states.get(&browsing_context.pipeline_id) { Some(&DocumentState::Idle) => {}, Some(&DocumentState::Pending) | None => { return ReadyToSave::DocumentLoading; }, } // Check the visible rectangle for this pipeline. If the constellation has received a // size for the pipeline, then its painting should be up to date. // // If the rectangle for this pipeline is zero sized, it will // never be painted. In this case, don't query the layout // thread as it won't contribute to the final output image. if browsing_context.size == Size2D::zero() { continue; } // Get the epoch that the compositor has drawn for this pipeline. let compositor_epoch = pipeline_states.get(&browsing_context.pipeline_id); match compositor_epoch { Some(compositor_epoch) => { // Synchronously query the layout thread to see if the current // epoch matches what the compositor has drawn. If they match // (and script is idle) then this pipeline won't change again // and can be considered stable. let message = LayoutControlMsg::GetCurrentEpoch(epoch_sender.clone()); if let Err(e) = pipeline.layout_chan.send(message) { warn!("Failed to send GetCurrentEpoch ({}).", e); } match epoch_receiver.recv() { Err(e) => warn!("Failed to receive current epoch ({}).", e), Ok(layout_thread_epoch) => { if layout_thread_epoch != *compositor_epoch { return ReadyToSave::EpochMismatch; } }, } }, None => { // The compositor doesn't know about this pipeline yet. // Assume it hasn't rendered yet. return ReadyToSave::PipelineUnknown; }, } } // All script threads are idle and layout epochs match compositor, so output image! ReadyToSave::Ready } /// Get the current activity of a pipeline. fn get_activity(&self, pipeline_id: PipelineId) -> DocumentActivity { let mut ancestor_id = pipeline_id; loop { if let Some(ancestor) = self.pipelines.get(&ancestor_id) { if let Some(browsing_context) = self.browsing_contexts.get(&ancestor.browsing_context_id) { if browsing_context.pipeline_id == ancestor_id { if let Some(parent_pipeline_id) = browsing_context.parent_pipeline_id { ancestor_id = parent_pipeline_id; continue; } else { return DocumentActivity::FullyActive; } } } } if pipeline_id == ancestor_id { return DocumentActivity::Inactive; } else { return DocumentActivity::Active; } } } /// Set the current activity of a pipeline. fn set_activity(&self, pipeline_id: PipelineId, activity: DocumentActivity) { debug!("Setting activity of {} to be {:?}.", pipeline_id, activity); if let Some(pipeline) = self.pipelines.get(&pipeline_id) { pipeline.set_activity(activity); let child_activity = if activity == DocumentActivity::Inactive { DocumentActivity::Active } else { activity }; for child_id in &pipeline.children { if let Some(child) = self.browsing_contexts.get(child_id) { self.set_activity(child.pipeline_id, child_activity); } } } } /// Update the current activity of a pipeline. fn update_activity(&self, pipeline_id: PipelineId) { self.set_activity(pipeline_id, self.get_activity(pipeline_id)); } /// Handle updating the size of a browsing context. /// This notifies every pipeline in the context of the new size. fn resize_browsing_context( &mut self, new_size: WindowSizeData, size_type: WindowSizeType, browsing_context_id: BrowsingContextId, ) { if let Some(browsing_context) = self.browsing_contexts.get_mut(&browsing_context_id) { browsing_context.size = new_size.initial_viewport; // Send Resize (or ResizeInactive) messages to each pipeline in the frame tree. let pipeline_id = browsing_context.pipeline_id; let pipeline = match self.pipelines.get(&pipeline_id) { None => return warn!("Pipeline {:?} resized after closing.", pipeline_id), Some(pipeline) => pipeline, }; let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize( pipeline.id, new_size, size_type, )); let pipeline_ids = browsing_context .pipelines .iter() .filter(|pipeline_id| **pipeline_id != pipeline.id); for id in pipeline_ids { if let Some(pipeline) = self.pipelines.get(&id) { let _ = pipeline .event_loop .send(ConstellationControlMsg::ResizeInactive( pipeline.id, new_size, )); } } } // Send resize message to any pending pipelines that aren't loaded yet. for change in &self.pending_changes { let pipeline_id = change.new_pipeline_id; let pipeline = match self.pipelines.get(&pipeline_id) { None => { warn!("Pending pipeline {:?} is closed", pipeline_id); continue; }, Some(pipeline) => pipeline, }; if pipeline.browsing_context_id == browsing_context_id { let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize( pipeline.id, new_size, size_type, )); } } } // Handle switching from fullscreen mode fn switch_fullscreen_mode(&mut self, browsing_context_id: BrowsingContextId) { if let Some(browsing_context) = self.browsing_contexts.get(&browsing_context_id) { let pipeline_id = browsing_context.pipeline_id; let pipeline = match self.pipelines.get(&pipeline_id) { None => { return warn!( "Pipeline {:?} switched from fullscreen mode after closing.", pipeline_id ) }, Some(pipeline) => pipeline, }; let _ = pipeline .event_loop .send(ConstellationControlMsg::ExitFullScreen(pipeline.id)); } } // Close a browsing context (and all children) fn close_browsing_context( &mut self, browsing_context_id: BrowsingContextId, exit_mode: ExitPipelineMode, ) { debug!("Closing browsing context {}.", browsing_context_id); self.close_browsing_context_children( browsing_context_id, DiscardBrowsingContext::Yes, exit_mode, ); let browsing_context = match self.browsing_contexts.remove(&browsing_context_id) { Some(ctx) => ctx, None => return warn!("Closing browsing context {:?} twice.", browsing_context_id), }; { let session_history = self.get_joint_session_history(browsing_context.top_level_id); session_history.remove_entries_for_browsing_context(browsing_context_id); } if let Some(parent_pipeline_id) = browsing_context.parent_pipeline_id { match self.pipelines.get_mut(&parent_pipeline_id) { None => { return warn!( "Pipeline {:?} child closed after parent.", parent_pipeline_id ); }, Some(parent_pipeline) => parent_pipeline.remove_child(browsing_context_id), }; } debug!("Closed browsing context {:?}.", browsing_context_id); } // Close the children of a browsing context fn close_browsing_context_children( &mut self, browsing_context_id: BrowsingContextId, dbc: DiscardBrowsingContext, exit_mode: ExitPipelineMode, ) { debug!("Closing browsing context children {}.", browsing_context_id); // Store information about the pipelines to be closed. Then close the // pipelines, before removing ourself from the browsing_contexts hash map. This // ordering is vital - so that if close_pipeline() ends up closing // any child browsing contexts, they can be removed from the parent browsing context correctly. let mut pipelines_to_close: Vec<PipelineId> = self .pending_changes .iter() .filter(|change| change.browsing_context_id == browsing_context_id) .map(|change| change.new_pipeline_id) .collect(); if let Some(browsing_context) = self.browsing_contexts.get(&browsing_context_id) { pipelines_to_close.extend(&browsing_context.pipelines) } for pipeline_id in pipelines_to_close { self.close_pipeline(pipeline_id, dbc, exit_mode); } debug!("Closed browsing context children {}.", browsing_context_id); } // Discard the pipeline for a given document, udpdate the joint session history. fn handle_discard_document( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, pipeline_id: PipelineId, ) { match self.browsers.get_mut(&top_level_browsing_context_id) { Some(browser) => { let load_data = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline.load_data.clone(), None => return warn!("Discarding closed pipeline {}", pipeline_id), }; browser.session_history.replace_reloader( NeedsToReload::No(pipeline_id), NeedsToReload::Yes(pipeline_id, load_data), ); }, None => { return warn!( "Discarding pipeline {} after browser {} closure", pipeline_id, top_level_browsing_context_id, ); }, }; self.close_pipeline( pipeline_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal, ); } // Send a message to script requesting the document associated with this pipeline runs the 'unload' algorithm. fn unload_document(&self, pipeline_id: PipelineId) { if let Some(pipeline) = self.pipelines.get(&pipeline_id) { let msg = ConstellationControlMsg::UnloadDocument(pipeline_id); let _ = pipeline.event_loop.send(msg); } } // Close all pipelines at and beneath a given browsing context fn close_pipeline( &mut self, pipeline_id: PipelineId, dbc: DiscardBrowsingContext, exit_mode: ExitPipelineMode, ) { debug!("Closing pipeline {:?}.", pipeline_id); // Sever connection to browsing context let browsing_context_id = self .pipelines .get(&pipeline_id) .map(|pipeline| pipeline.browsing_context_id); if let Some(browsing_context) = browsing_context_id .and_then(|browsing_context_id| self.browsing_contexts.get_mut(&browsing_context_id)) { browsing_context.pipelines.remove(&pipeline_id); } // Store information about the browsing contexts to be closed. Then close the // browsing contexts, before removing ourself from the pipelines hash map. This // ordering is vital - so that if close_browsing_context() ends up closing // any child pipelines, they can be removed from the parent pipeline correctly. let browsing_contexts_to_close = { let mut browsing_contexts_to_close = vec![]; if let Some(pipeline) = self.pipelines.get(&pipeline_id) { browsing_contexts_to_close.extend_from_slice(&pipeline.children); } browsing_contexts_to_close }; // Remove any child browsing contexts for child_browsing_context in &browsing_contexts_to_close { self.close_browsing_context(*child_browsing_context, exit_mode); } // Note, we don't remove the pipeline now, we wait for the message to come back from // the pipeline. let pipeline = match self.pipelines.get(&pipeline_id) { Some(pipeline) => pipeline, None => return warn!("Closing pipeline {:?} twice.", pipeline_id), }; // Remove this pipeline from pending changes if it hasn't loaded yet. let pending_index = self .pending_changes .iter() .position(|change| change.new_pipeline_id == pipeline_id); if let Some(pending_index) = pending_index { self.pending_changes.remove(pending_index); } // Inform script, compositor that this pipeline has exited. match exit_mode { ExitPipelineMode::Normal => pipeline.exit(dbc), ExitPipelineMode::Force => pipeline.force_exit(dbc), } debug!("Closed pipeline {:?}.", pipeline_id); } // Randomly close a pipeline -if --random-pipeline-closure-probability is set fn maybe_close_random_pipeline(&mut self) { match self.random_pipeline_closure { Some((ref mut rng, probability)) => { if probability <= rng.gen::<f32>() { return; } }, _ => return, }; // In order to get repeatability, we sort the pipeline ids. let mut pipeline_ids: Vec<&PipelineId> = self.pipelines.keys().collect(); pipeline_ids.sort(); if let Some((ref mut rng, probability)) = self.random_pipeline_closure { if let Some(pipeline_id) = pipeline_ids.choose(rng) { if let Some(pipeline) = self.pipelines.get(pipeline_id) { if self .pending_changes .iter() .any(|change| change.new_pipeline_id == pipeline.id) && probability <= rng.gen::<f32>() { // We tend not to close pending pipelines, as that almost always // results in pipelines being closed early in their lifecycle, // and not stressing the constellation as much. // https://github.com/servo/servo/issues/18852 info!("Not closing pending pipeline {}.", pipeline_id); } else { // Note that we deliberately do not do any of the tidying up // associated with closing a pipeline. The constellation should cope! warn!("Randomly closing pipeline {}.", pipeline_id); pipeline.force_exit(DiscardBrowsingContext::No); } } } } } fn get_joint_session_history( &mut self, top_level_id: TopLevelBrowsingContextId, ) -> &mut JointSessionHistory { &mut self .browsers .entry(top_level_id) // This shouldn't be necessary since `get_joint_session_history` is // invoked for existing browsers but we need this to satisfy the // type system. .or_insert_with(|| Browser { focused_browsing_context_id: BrowsingContextId::from(top_level_id), session_history: JointSessionHistory::new(), }) .session_history } // Convert a browsing context to a sendable form to pass to the compositor fn browsing_context_to_sendable( &self, browsing_context_id: BrowsingContextId, ) -> Option<SendableFrameTree> { self.browsing_contexts .get(&browsing_context_id) .and_then(|browsing_context| { self.pipelines .get(&browsing_context.pipeline_id) .map(|pipeline| { let mut frame_tree = SendableFrameTree { pipeline: pipeline.to_sendable(), children: vec![], }; for child_browsing_context_id in &pipeline.children { if let Some(child) = self.browsing_context_to_sendable(*child_browsing_context_id) { frame_tree.children.push(child); } } frame_tree }) }) } /// Re-send the frame tree to the compositor. fn update_frame_tree_if_active( &mut self, top_level_browsing_context_id: TopLevelBrowsingContextId, ) { // Only send the frame tree if it's the active one or if no frame tree // has been sent yet. if self.active_browser_id.is_none() || Some(top_level_browsing_context_id) == self.active_browser_id { self.send_frame_tree(top_level_browsing_context_id); } } /// Send the current frame tree to compositor fn send_frame_tree(&mut self, top_level_browsing_context_id: TopLevelBrowsingContextId) { // Note that this function can panic, due to ipc-channel creation failure. // avoiding this panic would require a mechanism for dealing // with low-resource scenarios. let browsing_context_id = BrowsingContextId::from(top_level_browsing_context_id); if let Some(frame_tree) = self.browsing_context_to_sendable(browsing_context_id) { debug!( "Sending frame tree for browsing context {}.", browsing_context_id ); self.active_browser_id = Some(top_level_browsing_context_id); self.compositor_proxy .send(ToCompositorMsg::SetFrameTree(frame_tree)); } } }<|fim▁end|>
if let Err(e) = sender.send(CanvasMsg::Create( canvas_id_sender,
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Created on Wed Dec 14 14:10:41 2016 @author: sigurdja """<|fim▁hole|> version="0.1", packages=find_packages(), )<|fim▁end|>
from setuptools import setup, find_packages setup( name="psse_models",
<|file_name|>reloj3.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3 # -*- coding: utf-8 -*- # Una copia del programa que hice en python 2 para tomar el tiempo en # una partida de ajedrez pero este es para python 3. from tkinter import * from tkinter import font import datetime class RelojAjedrez(Frame): def __init__(self, master = None): Frame.__init__(self, master) self.grid(sticky = N + S + E + W) self.crearControles() def crearControles(self): top = self.winfo_toplevel() top.rowconfigure(0, weight = 1) top.columnconfigure(0, weight = 1) self.rowconfigure(2, weight = 1) self.columnconfigure(0, weight = 1) self.columnconfigure(1, weight = 1) letrasDisp = font.Font(family = "Courier", size = 20, weight = "bold") self.lbTiNegras = Label(self, text = "00:00:00", font = letrasDisp, bg = "#000", fg = "#FFF") self.lbTiNegras.grid(row = 0, column = 0) self.lbTiBlancas = Label(self, text = "00:00:00", font = letrasDisp, bg = "#FFF", fg = "#000") self.lbTiBlancas.grid(row = 0, column = 1) self.bGNegras = Button(self, text = "Negras", command = self.bGNegrasClick) self.bGNegras.grid(row = 1, column = 0) self.bGBlancas = Button(self, text = "Blancas", command = self.bGBlancasClick) self.bGBlancas.grid(row = 1, column = 1) self.bind_all("<space>", self.teclaPres) self.bind_all("<Shift_L>", self.teclaPres) self.bind_all("<Shift_R>", self.teclaPres) self.bGBlancas.grid() self.iniciarCont() self.mostrarReloj() def iniciarCont(self): # cantidad de minutos cantmin = 5 # la variable para llevar el reloj Negro self.rNegras = datetime.timedelta(minutes = cantmin) # la variable para llevar el reloj Blanco self.rBlancas = datetime.timedelta(minutes = cantmin) # variable para guardar cuanto tiempo había en un reloj al # comienzo de la cuenta self.dComienzo = datetime.timedelta(minutes = cantmin) # variable para tomar tiempo en cada tick y compararlo con el # que se toma al inicio del intervalo en referencia self.tMuestra = datetime.datetime.min # Tiempo que se toma al principio del intervalo para referencia self.tReferencia = datetime.datetime.min # si es true el reloj referenciado es el blanco, sino el negro self.refBlancas = False # una variable entera para guardar el id que devuelve el after # en caso de que se necesite cancelarlo self.idTempo = None # indica que el reloj está iniciado y contando self.contando = False def relojTick(self): self.tMuestra = datetime.datetime.now() # si el reloj blanco está seleccionado if (self.refBlancas): # que descuente tiempo del reloj blanco self.rBlancas = self.dComienzo - ( self.tMuestra - self.tReferencia) if self.rBlancas <= datetime.timedelta(0): self.rBlancas = datetime.timedelta(0) self.idTempo = None self.contando = False else: self.idTempo = self.after(100, self.relojTick) else: # si no que descuente del reloj negro self.rNegras = self.dComienzo - ( self.tMuestra - self.tReferencia) if self.rNegras <= datetime.timedelta(0):<|fim▁hole|> self.idTempo = None self.contando = False else: self.idTempo = self.after(100, self.relojTick) self.mostrarReloj() def mostrarReloj(self): self.lbTiNegras.config(text = "%02d:%02d:%02d" % ( self.rNegras.seconds / 60, self.rNegras.seconds % 60, self.rNegras.microseconds / 10000)) self.lbTiBlancas.config(text = "%02d:%02d:%02d" % ( self.rBlancas.seconds / 60, self.rBlancas.seconds % 60, self.rBlancas.microseconds / 10000)) def detenerBlancas(self, detenerRBlanco): # si alguno de los relojes no tiene tiempo if ((self.rBlancas <= datetime.timedelta(0)) or (self.rBlancas <= datetime.timedelta(0))): self.iniciarCont() # si me piden detener el reloj blanco if detenerRBlanco and (not self.contando or (self.contando and self.refBlancas)): # si hay un reloj activado que lo detenga if self.idTempo != None: self.after_cancel(self.idTempo) # que detenga el reloj blanco self.rBlancas = self.dComienzo - ( self.tMuestra - self.tReferencia) # y que inicie el reloj negro self.dComienzo = self.rNegras self.tMuestra = self.tReferencia = datetime.datetime.now() self.refBlancas = False # si me piden detener el reloj negro elif (not detenerRBlanco and (not self.contando or (self.contando and not self.refBlancas))): # si hay un reloj activado que lo detenga if self.idTempo != None: self.after_cancel(self.idTempo) # que detenga el reloj negro self.rNegro = self.dComienzo - ( self.tMuestra - self.tReferencia) # y que inicie el reloj blanco self.dComienzo = self.rBlancas self.tMuestra = self.tReferencia = datetime.datetime.now() self.refBlancas = True else: return # ahora estamos contando self.contando = True # hay que poner a correr el reloj self.idTempo = self.after(100, self.relojTick) def bGNegrasClick(self): self.detenerBlancas(False) def bGBlancasClick(self): self.detenerBlancas(True) def teclaPres(self, event): if (event.keysym == "space"): self.detenerBlancas(self.refBlancas) elif (event.keysym == "Shift_L"): self.detenerBlancas(False) elif (event.keysym == "Shift_R"): self.detenerBlancas(True) app = RelojAjedrez() app.master.title("Reloj de Ajedrez") app.mainloop()<|fim▁end|>
self.rNegras = datetime.timedelta(0)
<|file_name|>ApplicationTest.java<|end_file_name|><|fim▁begin|>package com.example.apahlavan1.top10downloader; import android.app.Application; import android.test.ApplicationTestCase; /** * <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a> */ public class ApplicationTest extends ApplicationTestCase<Application> {<|fim▁hole|> super(Application.class); } }<|fim▁end|>
public ApplicationTest() {
<|file_name|>page-api.cpp<|end_file_name|><|fim▁begin|>#include "models/page-api.h" // #include <QtConcurrentRun> #include <QTimer> #include <QtMath> #include <utility> #include "functions.h" #include "image.h" #include "logger.h" #include "models/api/api.h" #include "models/filtering/post-filter.h" #include "models/page.h" #include "models/search-query/search-query.h" #include "models/site.h" #include "network/network-reply.h" #include "tags/tag.h" PageApi::PageApi(Page *parentPage, Profile *profile, Site *site, Api *api, SearchQuery query, int page, int limit, PostFilter postFiltering, bool smart, QObject *parent, int pool, int lastPage, qulonglong lastPageMinId, qulonglong lastPageMaxId, QString lastPageMinDate, QString lastPageMaxDate) : QObject(parent), m_parentPage(parentPage), m_profile(profile), m_site(site), m_api(api), m_query(std::move(query)), m_errors(QStringList()), m_postFiltering(std::move(postFiltering)), m_imagesPerPage(limit), m_lastPage(lastPage), m_lastPageMinId(lastPageMinId), m_lastPageMaxId(lastPageMaxId), m_lastPageMinDate(std::move(lastPageMinDate)), m_lastPageMaxDate(std::move(lastPageMaxDate)), m_smart(smart), m_reply(nullptr) { m_imagesCount = -1; m_maxImagesCount = -1; m_pagesCount = -1; m_imagesCountSafe = false; m_pagesCountSafe = false; m_page = page; m_pool = pool; m_format = m_api->getName(); updateUrls(); } void PageApi::setLastPage(Page *page) { if (!page->isValid()) { return; } m_lastPage = page->page(); m_lastPageMaxId = page->maxId(); m_lastPageMinId = page->minId(); m_lastPageMaxDate = page->maxDate(); m_lastPageMinDate = page->minDate(); if (!page->nextPage().isEmpty() && page->page() == m_page - 1) { m_url = page->nextPage(); } else if (!page->prevPage().isEmpty() && page->page() == m_page + 1) { m_url = page->prevPage(); } updateUrls(); } void PageApi::updateUrls() { QString url; m_errors.clear(); // URL searches if (m_query.urls.contains(m_api->getName())) { url = m_query.urls[m_api->getName()]; } else if (m_query.tags.count() == 1 && isUrl(m_query.tags.first())) { url = m_query.tags.first(); } else if (!m_url.isEmpty()) { url = m_url.toString(); } else { PageUrl ret; if (!m_query.gallery.isNull()) { ret = m_api->galleryUrl(m_query.gallery, m_page, m_imagesPerPage, m_site); } else { LastPageInformation lastPage; lastPage.page = m_lastPage; lastPage.minId = m_lastPageMinId; lastPage.minDate = m_lastPageMinDate; lastPage.maxId = m_lastPageMaxId; lastPage.maxDate = m_lastPageMaxDate; ret = m_api->pageUrl(m_query.tags.join(' '), m_page, m_imagesPerPage, lastPage, m_site); } if (!ret.error.isEmpty()) { m_errors.append(ret.error); } url = ret.url; m_headers = ret.headers; } // Add site information to URL url = m_site->fixUrl(url).toString();<|fim▁hole|>} void PageApi::setReply(NetworkReply *reply) { if (m_reply != nullptr) { if (m_reply->isRunning()) { m_reply->abort(); } m_reply->deleteLater(); } m_reply = reply; } void PageApi::load(bool rateLimit, bool force) { if (m_loading) { if (!force) { return; } setReply(nullptr); } if (m_url.isEmpty() && !m_errors.isEmpty()) { for (const QString &err : qAsConst(m_errors)) { log(QStringLiteral("[%1][%2] %3").arg(m_site->url(), m_format, err), Logger::Warning); } emit finishedLoading(this, LoadResult::Error); return; } // Reading reply and resetting vars m_images.clear(); m_tags.clear(); m_loaded = false; m_loading = true; m_pageImageCount = 0; m_filteredImageCount = 0; m_imagesCount = -1; m_maxImagesCount = -1; m_pagesCount = -1; log(QStringLiteral("[%1][%2] Loading page `%3`").arg(m_site->url(), m_format, m_url.toString().toHtmlEscaped()), Logger::Info); Site::QueryType type = rateLimit ? Site::QueryType::Retry : Site::QueryType::List; setReply(m_site->get(m_url, type, QUrl(), "", nullptr, m_headers)); connect(m_reply, &NetworkReply::finished, this, &PageApi::parse); } void PageApi::abort() { if (m_reply != nullptr && m_reply->isRunning()) { m_reply->abort(); } } bool PageApi::addImage(const QSharedPointer<Image> &img) { if (img.isNull()) { return false; } m_pageImageCount++; QStringList filters = m_postFiltering.match(img->tokens(m_profile)); if (!filters.isEmpty()) { m_filteredImageCount++; img->deleteLater(); log(QStringLiteral("[%1][%2] Image filtered. Reason: %3.").arg(m_site->url(), m_format, filters.join(", ")), Logger::Info); return false; } m_images.append(img); return true; } void PageApi::parse() { if (m_reply == nullptr) { return; } log(QStringLiteral("[%1][%2] Receiving page `%3`").arg(m_site->url(), m_format, m_reply->url().toString().toHtmlEscaped()), Logger::Info); // Check redirection QUrl redir = m_reply->attribute(QNetworkRequest::RedirectionTargetAttribute).toUrl(); if (!redir.isEmpty()) { QUrl newUrl = m_site->fixUrl(redir.toString(), m_url); log(QStringLiteral("[%1][%2] Redirecting page `%3` to `%4`").arg(m_site->url(), m_format, m_url.toString().toHtmlEscaped(), newUrl.toString().toHtmlEscaped()), Logger::Info); // HTTP -> HTTPS redirects const bool ssl = m_site->setting("ssl", false).toBool(); if (!ssl && newUrl.path() == m_url.path() && newUrl.scheme() == "https" && m_url.scheme() == "http") { const bool notThisSite = m_site->setting("ssl_never_correct", false).toBool(); if (!notThisSite) { emit httpsRedirect(); } } m_url = newUrl; load(false, true); return; } // Detect HTTP 429 / 509 usage limit reached const int statusCode = m_reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (statusCode == 429 || statusCode == 509) { log(QStringLiteral("[%1][%2] Limit reached (%3). New try.").arg(m_site->url(), m_format, QString::number(statusCode)), Logger::Warning); load(true, true); return; } // QtConcurrent::run(this, &PageApi::parseActual); parseActual(); } void PageApi::parseActual() { const bool isGallery = !m_query.gallery.isNull(); const bool parseErrors = isGallery ? m_api->parseGalleryErrors() : m_api->parsePageErrors(); const int statusCode = m_reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); const int offset = (m_page - 1) * m_imagesPerPage; // Detect Cloudflare if ((statusCode == 403 || statusCode == 429 || statusCode == 503) && m_reply->rawHeader("server") == "cloudflare") { m_errors.append("Cloudflare wall"); log(QStringLiteral("[%1][%2] Cloudflare wall for '%3'").arg(m_site->url(), m_format, m_reply->url().toString()), Logger::Error); setReply(nullptr); m_loaded = true; m_loading = false; emit finishedLoading(this, LoadResult::Error); return; } // Try to read the reply m_source = m_reply->readAll(); if (m_source.isEmpty() || (m_reply->error() != NetworkReply::NetworkError::NoError && !parseErrors)) { if (m_reply->error() != NetworkReply::NetworkError::OperationCanceledError) { log(QStringLiteral("[%1][%2] Loading error: %3 (%4)").arg(m_site->url(), m_format, m_reply->errorString()).arg(m_reply->error()), Logger::Error); } setReply(nullptr); m_loaded = true; m_loading = false; emit finishedLoading(this, LoadResult::Error); return; } // Parse source ParsedPage page; if (isGallery) { page = m_api->parseGallery(m_parentPage, m_source, statusCode, offset); } else { page = m_api->parsePage(m_parentPage, m_source, statusCode, offset); } // Handle errors if (!page.error.isEmpty()) { m_errors.append(page.error); log(QStringLiteral("[%1][%2] %3").arg(m_site->url(), m_format, page.error), Logger::Warning); setReply(nullptr); m_loaded = true; m_loading = false; emit finishedLoading(this, LoadResult::Error); return; } // Fill data from parsing result if (page.pageCount >= 0) { setPageCount(page.pageCount, true); } if (page.imageCount >= 0) { setImageCount(page.imageCount, true); } for (const Tag &tag : qAsConst(page.tags)) { m_tags.append(tag); } for (const QSharedPointer<Image> &img : qAsConst(page.images)) { addImage(img); } if (page.urlNextPage.isValid()) { m_urlNextPage = page.urlNextPage; } if (page.urlPrevPage.isValid()) { m_urlPrevPage = page.urlPrevPage; } if (!page.wiki.isEmpty()) { m_wiki = fixCloudflareEmails(page.wiki); } // Link images to their respective galleries if (isGallery) { for (auto &img : m_images) { img->setParentGallery(m_query.gallery); } } // Complete image count information from tag count information if (m_imagesCount < 1 || !m_imagesCountSafe) { int found = 0; int min = -1; for (const Tag &tag : qAsConst(m_tags)) { if (m_query.tags.contains(tag.text())) { found++; if (min == -1 || min > tag.count()) { min = tag.count(); } } } int searchTagsCount = m_query.tags.count();; if (m_query.tags.count() > found) { const QStringList modifiers = QStringList() << "-" << m_api->modifiers(); for (const QString &search : qAsConst(m_query.tags)) { for (const QString &modifier : modifiers) { if (search.startsWith(modifier)) { searchTagsCount--; break; } } } } if (searchTagsCount == found) { if (m_query.tags.count() == 1) { const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; const int expectedPageCount = qCeil(static_cast<qreal>(min) / perPage); setImageCount(min, m_pagesCountSafe && expectedPageCount == m_pagesCount); } setImageMaxCount(min); } } // Complete missing tag information from images' tags if necessary if (m_tags.isEmpty()) { QStringList tagsGot; for (const QSharedPointer<Image> &img : qAsConst(m_images)) { for (const Tag &tag : img->tags()) { if (tagsGot.contains(tag.text())) { const int index = tagsGot.indexOf(tag.text()); m_tags[index].setCount(m_tags[index].count() + 1); } else { m_tags.append(tag); tagsGot.append(tag.text()); } } } } // Remove first n images (according to site settings) int skip = m_site->setting("ignore/always", 0).toInt(); if (false && m_isAltPage) { // FIXME(Bionus): broken since move to Api class skip = m_site->setting("ignore/alt", 0).toInt(); } if (m_page == 1) { skip = m_site->setting("ignore/1", 0).toInt(); } if (m_api->getName() == QLatin1String("Html")) { if (m_images.size() >= skip) { for (int i = 0; i < skip; ++i) { m_images.removeFirst(); m_pageImageCount--; } } else { log(QStringLiteral("Wanting to skip %1 images but only %2 returned").arg(skip).arg(m_images.size()), Logger::Warning); } } // Virtual paging int firstImage = 0; int lastImage = m_smart ? m_imagesPerPage : m_images.size(); if (false && !m_originalUrl.contains("{page}") && !m_originalUrl.contains("{cpage}") && !m_originalUrl.contains("{pagepart}") && !m_originalUrl.contains("{pid}")) { // TODO(Bionus): add real virtual paging firstImage = m_imagesPerPage * (m_page - 1); lastImage = m_imagesPerPage; } while (firstImage > 0 && !m_images.isEmpty()) { m_images.removeFirst(); firstImage--; } while (m_images.size() > lastImage) { m_images.removeLast(); } log(QStringLiteral("[%1][%2] Parsed page `%3`: %4 images (%5), %6 tags (%7), %8 total (%9), %10 pages (%11)").arg(m_site->url(), m_format, m_reply->url().toString().toHtmlEscaped()).arg(m_images.count()).arg(m_pageImageCount).arg(page.tags.count()).arg(m_tags.count()).arg(imagesCount(false)).arg(imagesCount(true)).arg(pagesCount(false)).arg(pagesCount(true)), Logger::Info); setReply(nullptr); m_loaded = true; m_loading = false; emit finishedLoading(this, LoadResult::Ok); } void PageApi::clear() { m_images.clear(); m_pageImageCount = 0; m_filteredImageCount = 0; } const QList<QSharedPointer<Image>> &PageApi::images() const { return m_images; } const QUrl &PageApi::url() const { return m_url; } const QString &PageApi::source() const { return m_source; } const QString &PageApi::wiki() const { return m_wiki; } const QList<Tag> &PageApi::tags() const { return m_tags; } const QStringList &PageApi::errors() const { return m_errors; } const QUrl &PageApi::nextPage() const { return m_urlNextPage; } const QUrl &PageApi::prevPage() const { return m_urlPrevPage; } bool PageApi::isLoaded() const { return m_loaded; } int PageApi::imagesPerPage() const { return m_imagesPerPage; } int PageApi::page() const { return m_page; } int PageApi::pageImageCount() const { return m_pageImageCount; } int PageApi::filteredImageCount() const { return m_filteredImageCount; } int PageApi::highLimit() const { return m_api->maxLimit(); } bool PageApi::hasNext() const { int pageCount = pagesCount(); int maxPages = maxPagesCount(); if (pageCount <= 0 && maxPages > 0) { pageCount = maxPages; } return pageCount > m_page || (pageCount <= 0 && m_pageImageCount > 0); } bool PageApi::isImageCountSure() const { return m_imagesCountSafe; } int PageApi::imagesCount(bool guess) const { if (m_imagesCountSafe) { return m_imagesCount; } if (m_pagesCount == 1) { return m_pageImageCount; } if (!guess) { return -1; } if (m_imagesCount < 0 && m_pagesCount >= 0) { const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; return m_pagesCount * perPage; } return m_imagesCount; } int PageApi::maxImagesCount() const { return m_maxImagesCount; } bool PageApi::isPageCountSure() const { return m_pagesCountSafe; } int PageApi::pagesCount(bool guess) const { if (m_pagesCountSafe) { return m_pagesCount; } if (!guess) { return -1; } if (m_pagesCount < 0 && m_imagesCount >= 0) { const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; return qCeil(static_cast<qreal>(m_imagesCount) / perPage); } return m_pagesCount; } int PageApi::maxPagesCount() const { if (m_maxImagesCount < 0) { return -1; } const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; return qCeil(static_cast<qreal>(m_maxImagesCount) / perPage); } qulonglong PageApi::maxId() const { qulonglong maxId = 0; for (const QSharedPointer<Image> &img : m_images) { if (img->id() > maxId || maxId == 0) { maxId = img->id(); } } return maxId; } qulonglong PageApi::minId() const { qulonglong minId = 0; for (const QSharedPointer<Image> &img : m_images) { if (img->id() < minId || minId == 0) { minId = img->id(); } } return minId; } QString PageApi::maxDate() const { QString maxDate; for (const QSharedPointer<Image> &img : m_images) { if (img->dateRaw() > maxDate || maxDate.isEmpty()) { maxDate = img->dateRaw(); } } return maxDate; } QString PageApi::minDate() const { QString minDate; for (const QSharedPointer<Image> &img : m_images) { if (img->dateRaw() < minDate || minDate.isEmpty()) { minDate = img->dateRaw(); } } return minDate; } void PageApi::setImageCount(int count, bool sure) { if (m_imagesCount <= 0 || (!m_imagesCountSafe && sure)) { m_imagesCount = count; m_imagesCountSafe = sure; if (sure) { const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; setPageCount(qCeil(static_cast<qreal>(count) / perPage), true); } } } void PageApi::setImageMaxCount(int maxCount) { m_maxImagesCount = maxCount; } void PageApi::setPageCount(int count, bool sure) { if (m_pagesCount <= 0 || (!m_pagesCountSafe && sure)) { m_pagesCount = count; m_pagesCountSafe = sure; if (sure) { const int forcedLimit = m_api->forcedLimit(); const int perPage = forcedLimit > 0 ? forcedLimit : m_imagesPerPage; setImageCount(count * perPage, false); } } }<|fim▁end|>
m_originalUrl = QString(url); m_url = QString(url);
<|file_name|>form-mapping.js<|end_file_name|><|fim▁begin|>var form = window.document.forms[0]; var inputsObject = {}; var rawParams = []; var mappedParams = []; var getInputValue = function(input) { var value = false; switch (input.type) { case 'radio': case 'checkbox': if (input.checked) { value = input.value; } break; case 'text': case 'hidden': default: value = input.value; break; } return value; }; var trim = function(s) { s = s.replace(/(^\s*)|(\s*$)/gi,"");<|fim▁hole|> s = s.replace(/[ ]{2,}/gi," "); s = s.replace(/\n /,"\n"); return s; }; var serialize = function(obj, prefix) { var str = []; for(var p in obj) { if (obj.hasOwnProperty(p)) { var k = prefix ? prefix + "[" + p + "]" : p, v = obj[p]; str.push(typeof v == "object" ? serialize(v, k) : encodeURIComponent(k) + "=" + encodeURIComponent(v)); } } return str.join("&"); } var ClosestLabel = function(element, tagname) { if(element.nodeName === 'FORM') { return null; } tagname = tagname.toLowerCase(); do { if(element.nodeName.toLowerCase() === tagname){ var LabelExists = element.getElementsByTagName("label").length>0; var labelText = (LabelExists ? element.getElementsByTagName("label")[0].innerText : false); return labelText; } } while(element = element.parentNode); return null; }; var Closest = function(element, tagname) { if(element.nodeName === 'FORM') { return null; } tagname = tagname.toLowerCase(); do { if(element.nodeName.toLowerCase() === tagname){ return element; } } while(element = element.parentNode); return null; }; /* make visible inputs first in loop */ var sortInputs = function(obj) { var visibleInputs = [], hiddenInputs = [], temp_obj = {}; for (var key in obj) { if (obj.hasOwnProperty(key)) { if(obj[key].type !== "hidden"){ visibleInputs.push(key); } else { hiddenInputs.push(key); } } } var merged = hiddenInputs.concat(visibleInputs.reverse()); // Merges both arrays for (var i = merged.length - 1; i >= 0; i--) { temp_obj[merged[i]] = obj[merged[i]]; }; return temp_obj; }; //var inbound_data = MapInput( this_input ); inbound_data = {}; no_match = []; var MapInput = function (inputObject) { var matchArray = [ "name", "first name", "last name", "email", "e-mail", "phone", "website", "job title", "your_favorite_food_", "company", "tele", "address", "comment"]; //var body = jQuery("body"); var input_id = inputObject.id || false; var input_name = inputObject.name || false; var this_val = inputObject['value']; var input = inputObject['input']; // Main Loop for (var i = matchArray.length - 1; i >= 0; i--) { var match = matchArray[i]; //console.log("Match name " + match); //console.log("Input name " + input_name); var lookingFor = trim(match); var nice_name = lookingFor.replace(" ",'_'); var in_object_already = nice_name in inbound_data; console.log('looking for: ' + lookingFor); if (input_name && input_name.toLowerCase().indexOf(lookingFor)>-1) { // Look for attr name match console.warn('match name: ' + lookingFor); if (!in_object_already) { inbound_data[nice_name] = this_val; } //inbound_data.push('match name: ' + lookingFor + ":" + this_val); } else if (input_id && input_id.toLowerCase().indexOf(lookingFor)>-1) { // look for id match console.warn("input labels found in form"); console.log('match id: ' + lookingFor); if (!in_object_already) { inbound_data[nice_name] = this_val; } //inbound_data.push('match id: ' + lookingFor + ":" + this_val); } else if (labelText = ClosestLabel(input, "li")) { console.warn("li labels found in form"); if (labelText.toLowerCase().indexOf(lookingFor)>-1) { console.log('match label text: ' + lookingFor); if (!in_object_already) { inbound_data[nice_name] = this_val; } } } else if (labelText = ClosestLabel(input, "div")) { console.warn("div labels found in form"); if (labelText.toLowerCase().indexOf(lookingFor)>-1) { console.log('match label text: ' + lookingFor); if (!in_object_already) { inbound_data[nice_name] = this_val; } } } else if (labelText = ClosestLabel(input, "p")) { console.warn("P labels found in form"); if (labelText.toLowerCase().indexOf(lookingFor)>-1) { console.log('match label text: ' + lookingFor); if (!in_object_already) { inbound_data[nice_name] = this_val; } } } else { console.warn("No matches"); console.log('Need additional mapping data'); no_match.push(lookingFor + ":" + this_val); } } console.log(inbound_data); console.log(serialize(inbound_data)); console.log('no match here', no_match); return inbound_data; }; for (var i=0; i < form.elements.length; i++) { formInput = form.elements[i]; multiple = false; var parent = formInput.parentNode; var parent_parent = parent.parentNode; console.log("PARENT", parent); console.log("PARENT PARENT", parent_parent); if (formInput.name) { inputName = formInput.name.replace(/\[([^\[]*)\]/g, "_$1"); inputName = inputName.replace(/-/g, "_"); if (!inputsObject[inputName]) { inputsObject[inputName] = {}; } if (!inputsObject[inputName]['input']) { inputsObject[inputName]['input'] = formInput; }; switch (formInput.nodeName) { case 'INPUT': value = this.getInputValue(formInput); console.log(value); if (value === false) { continue; } break; case 'TEXTAREA': value = formInput.value; break; case 'SELECT': if (formInput.multiple) { values = []; multiple = true; for (var j = 0; j < formInput.length; j++) { if (formInput[j].selected) { values.push(encodeURIComponent(formInput[j].value)); } } } else { value = (formInput.value); } console.log('select val', value); break; } if (value) { if (formInput.type) { inputsObject[inputName]['type'] = formInput.type; } if (formInput.id) { inputsObject[inputName]['id'] = formInput.id; } if ('classList' in document.documentElement) { if (formInput.classList) { inputsObject[inputName]['class'] = formInput.classList; } } // inputsObject[inputName].push(multiple ? values.join(',') : encodeURIComponent(value)); if (!inputsObject[inputName]['value']) { inputsObject[inputName]['value'] = []; } if (!inputsObject[inputName]['name']) { inputsObject[inputName]['name'] = inputName; } inputsObject[inputName]['value'].push(multiple ? values.join(',') : encodeURIComponent(value)); } } } console.log(inputsObject); var inputsObject = sortInputs(inputsObject); var matchCommon = /name|first name|last name|email|e-mail|phone|website|job title|company|tele|address|comment/; for (var input in inputsObject) { console.log(input); var inputValue = inputsObject[input]['value']; //if (matchCommon.test(input) !== false) { console.log(input + " Matches Regex run mapping test"); var map = MapInput(inputsObject[input]); console.log("MAPP", map); //mappedParams.push( input + '=' + inputsObject[input]['value'].join(',') ); //} /* Add custom hook here to look for additional values */ if (typeof (inputValue) != "undefined" && inputValue != null && inputValue != "") { rawParams.push( input + '=' + inputsObject[input]['value'].join(',') ); } } var raw_params = rawParams.join('&'); console.log("Raw PARAMS", raw_params); /* Filter here for raw */ var mapped_params = JSON.stringify(map); console.log("Mapped PARAMS", mapped_params); /* Filter here for mapped */<|fim▁end|>
<|file_name|>UsersService.java<|end_file_name|><|fim▁begin|>package com.p.service; import java.util.Collection; import java.util.Optional; import java.util.Random; import java.util.UUID; import javax.annotation.Resource; import org.apache.log4j.Logger; import org.hibernate.SessionFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContext; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Isolation; import org.springframework.transaction.annotation.Transactional; import org.springframework.util.Assert; import com.p.model.Notificacion; import com.p.model.Role; import com.p.model.User; import com.p.model.modelAux.RegisterUser; import com.p.model.repositories.UserRepository; @Service("usersService") @Transactional(isolation = Isolation.READ_UNCOMMITTED) public class UsersService { protected static Logger logger = Logger.getLogger("service"); @Resource(name = "sessionFactory") private SessionFactory sessionFactory; @Autowired private UserRepository repository; @Autowired private NotificacionService notificacionService; @Autowired private EmailManager emailManager; @Autowired private PasswordEncoder passwordEncoder; @Transactional /** * Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0) * * @param id * el id del usuario existente */ public void delete(Integer id) { Assert.notNull(id); Assert.isTrue(id > 0); repository.delete(id); } /** * Guarda o edita sengún si el ID esta o no relleno * * @param us */ @Transactional() public User save(User us) { gestionarAvatar(us); gestionarAltaUsuario(us); User usr = repository.save(us); return usr; } protected void gestionarAltaUsuario(User us) { if (us.getId() == null || us.getId().equals(0)) { gestionarNotificacionAltaUsuario(us); gestionarEmailAltaUsuario(us); } } protected void gestionarEmailAltaUsuario(User us) { emailManager.notify(us); } /** * @param us */ protected void gestionarNotificacionAltaUsuario(User us) { // Es nuevo usuario // Le enviamos un email y una notificacion Notificacion notificacion = notificacionService.create(); Optional<User> admin = repository.findAdministradores().stream() .findFirst(); Assert.isTrue(admin.isPresent()); User administrador = admin.get(); notificacion.setEmisor(administrador); notificacion.setReceptor(us); notificacion.setTitulo("Gracias por registrarte en Pachanga!"); notificacion .setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)"); notificacionService.save(notificacion); } /** * @param us */ protected void gestionarAvatar(User us) { if (us.getAvatar() == null) { Random rd = new Random(); us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]); } } @Transactional public User getByEmail(String login) { Assert.notNull(login); Assert.isTrue(login.length() > 0); return repository.findByEmail(login); } @Transactional public User findOne(Integer id) { Assert.notNull(id); Assert.isTrue(id > -1); return repository.findOne(id); } @Transactional public Collection<User> findAll() { return repository.findAll(); } @Transactional public Collection<User> findAllDifferent(String email) { return repository.findAllDifferent(email); } @Transactional(readOnly = true) /** * * @author David Romero Alcaide * @return */ public User getPrincipal() { User result; SecurityContext context; Authentication authentication; Object principal; // If the asserts in this method fail, then you're // likely to have your Tomcat's working directory // corrupt. Please, clear your browser's cache, stop // Tomcat, update your Maven's project configuration, // clean your project, clean Tomcat's working directory, // republish your project, and start it over. context = SecurityContextHolder.getContext(); Assert.notNull(context);<|fim▁hole|> Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User); result = getByEmail(((org.springframework.security.core.userdetails.User) principal) .getUsername()); Assert.notNull(result); Assert.isTrue(result.getId() != 0); return result; } public User map(RegisterUser user) { User usr = create(); usr.setEmail(user.getEmail()); usr.setPassword(user.getPassword()); return usr; } public User create() { User user = new User(); user.setFirstName(" "); user.setLastName(" "); user.setRole(Role.ROLE_USER); return user; } @Transactional public void regenerarPassword(User user) { String newPass = UUID.randomUUID().toString(); newPass = passwordEncoder.encode(newPass); user.setPassword(newPass); save(user); emailManager.notifyNewPassword(user,newPass); } @Transactional(isolation = Isolation.READ_UNCOMMITTED) public byte[] findImage(Integer id) { Assert.notNull(id); Assert.isTrue(id > 0); return repository.findImage(id); } @Transactional(readOnly = true) public Collection<? extends User> find(String texto) { return repository.findFullText(texto); } }<|fim▁end|>
authentication = context.getAuthentication(); Assert.notNull(authentication); principal = authentication.getPrincipal();
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .sql import * <|fim▁hole|><|fim▁end|>
__all__ = ['DBAdapter', 'get_db_adapter', 'async_atomic', 'async_atomic_func', 'get_db_settings']
<|file_name|>Opdracht1.py<|end_file_name|><|fim▁begin|>__author__ = 'SM' import sys import # from tkinter import * # #create new window # root = Tk() # root.title("Hello world app") # root.geometry('200x85') # # app = Frame() # app.grid() # lbl = Label(app, text = "Hello World!") # lbl.grid() # # bttn1 = Button(app, text = "Press") # bttn1.grid() # # root.mainloop() from qt import * a = QApplication(sys.argv) <|fim▁hole|> # Instantiate the button hellobutton = QPushButton("Say 'Hello world!'",None) # And connect the action "sayHello" to the event "button has been clicked" a.connect(hellobutton, SIGNAL("clicked()"), sayHello) # The rest is known already... a.setMainWidget(hellobutton) hellobutton.show() a.exec_loop()<|fim▁end|>
# Our function to call when the button is clicked def sayHello(): print("Hello world")
<|file_name|>emberTemplates.js<|end_file_name|><|fim▁begin|>module.exports = { options: { templateCompilerPath: 'bower_components/ember/ember-template-compiler.js', handlebarsPath: 'bower_components/handlebars/handlebars.js', preprocess: function (source) { return source.replace(/\s+/g, ' '); }, templateName: function (sourceFile) { /* These are how templates will be named based on their folder structure. <|fim▁hole|> modules/application/partials/[name].hbs ==> _[name] modules/[moduleName]/templates/[moduleName].hbs ==> [moduleName] modules/[moduleName]/templates/[name].hbs ==> [moduleName]/[name] modules/[moduleName]/partials/[name].hbs ==> [moduleName]/_[name] Additionally any template that is nested deeper will have that structure added as well. modules/[moduleName]/templates/[folder1]/[folder2]/[name] ==> [moduleName]/[folder1]/[folder2]/[name] */ var matches = sourceFile.match(new RegExp('(?:app/modules/(.*?)/|app/)(templates|partials)?/?(.*)')), moduleName = matches[1], isAppModule = (moduleName === 'application'), isPartial = (matches[2] === 'partials'), fileName = matches[3], prefix = (isPartial ? '_' : ''), templateName = ''; if (moduleName && !isAppModule) { if (fileName === moduleName) { templateName = moduleName; } else { templateName = moduleName + '/' + prefix + fileName; } } else { templateName = prefix + fileName; } console.log('Compiling ' + sourceFile.blue + ' to ' + templateName.green); return templateName; } }, compile: { files: { 'tmp/compiled-templates.js': ['templates/**/*.{hbs,handlebars}', 'app/**/*.{hbs,handlebars}'] } } };<|fim▁end|>
components/[name].hbs ==> components/[name] partials/[name].hbs ==> _[name] modules/application/templates/[name].hbs ==> [name]
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import logging from speaklater import make_lazy_string from quokka.modules.accounts.models import User logger = logging.getLogger() def lazy_str_setting(key, default=None): from flask import current_app return make_lazy_string( lambda: current_app.config.get(key, default) ) def get_current_user(): from flask.ext.security import current_user try: if not current_user.is_authenticated(): return None except RuntimeError: # Flask-Testing will fail pass try: return User.objects.get(id=current_user.id)<|fim▁hole|><|fim▁end|>
except Exception as e: logger.warning("No user found: %s" % e.message) return None
<|file_name|>adjust_event.js<|end_file_name|><|fim▁begin|>/** @file This file contains the functions to adjust an existing polygon. */ /** * Creates the adjusting event * @constructor * @param {string} dom_attach - The html element where the polygon lives * @param {array} x - The x coordinates for the polygon points * @param {array} y - The y coordinates for the polygon points * @param {string} obj_name - The name of the adjusted_polygon * @param {function} ExitFunction - the_function to execute once adjusting is done * @param {float} scale - Scaling factor for polygon points */ function AdjustEvent(dom_attach,x,y,obj_name,ExitFunction,scale, bounding_box_annot) { /****************** Private variables ************************/ // ID of DOM element to attach to: this.bounding_box = bounding_box_annot; this.dom_attach = dom_attach; this.scale_button_pressed = false; // Polygon: this.x = x; this.y = y; // Object name: this.obj_name = obj_name; // Function to call when event is finished: this.ExitFunction = ExitFunction; // Scaling factor for polygon points: this.scale = scale; // Boolean indicating whether a control point has been edited: this.editedControlPoints = false; // Boolean indicating whether a control point is being edited: this.isEditingControlPoint = false; // Boolean indicating whether a scaling point is being edited: this.isEditingScalingPoint = false; // Boolean indicating whether the center of mass of the polygon is being // adjusted: this.isMovingCenterOfMass = false; // Index into which control point has been selected: this.selectedControlPoint; // Index into which scaling point has been selected: this.selectedScalingPoint; // Location of center of mass: this.center_x; this.center_y; // Element ids of drawn control points: this.control_ids = null; this.scalepoints_ids = null; // Element id of drawn center point: this.center_id = null; // ID of drawn polygon: this.polygon_id; /****************** Public functions ************************/ /** This function starts the adjusting event: */ this.StartEvent = function() { console.log('LabelMe: Starting adjust event...'); // Draw polygon: this.polygon_id = this.DrawPolygon(this.dom_attach,this.x,this.y,this.obj_name,this.scale); select_anno.polygon_id = this.polygon_id; FillPolygon(this.polygon_id); oVP.ShowTemporalBar(); // Set mousedown action to stop adjust event when user clicks on canvas: $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousedown({obj: this},function(e) { return e.data.obj.StopAdjustEvent(); }); // Show control points: if (this.bounding_box){ this.ShowScalingPoints(); this.ShowCenterOfMass(); return; } this.ShowControlPoints(); // Show center of mass: this.ShowCenterOfMass(); $(window).keydown({obj: this}, function (e){ if (!e.data.obj.scale_button_pressed && e.keyCode == 17 && !e.data.obj.isEditingControlPoint){ e.data.obj.RemoveScalingPoints(); e.data.obj.RemoveControlPoints(); e.data.obj.RemoveCenterOfMass(); e.data.obj.ShowScalingPoints(); e.data.obj.scale_button_pressed = true; } }); $(window).keyup({obj: this}, function (e){ if (e.keyCode == 17 && !e.data.obj.isEditingControlPoint){ e.data.obj.scale_button_pressed = false; e.data.obj.RemoveScalingPoints(); e.data.obj.RemoveControlPoints(); e.data.obj.RemoveCenterOfMass(); e.data.obj.ShowControlPoints(); e.data.obj.ShowCenterOfMass(); } }); }; /** This function stops the adjusting event and calls the ExitFunction: */ this.StopAdjustEvent = function() { // Remove polygon: $('#'+this.polygon_id).remove(); // Remove key press action $(window).unbind("keydown"); $(window).unbind("keyup"); // Remove control points and center of mass point: this.RemoveControlPoints(); this.RemoveCenterOfMass(); this.RemoveScalingPoints(); console.log('LabelMe: Stopped adjust event.'); oVP.HideTemporalBar(); // Call exit function: this.ExitFunction(this.x,this.y,this.editedControlPoints); }; /** This function shows the scaling points for a polygon */ this.ShowScalingPoints = function (){ if(!this.scalepoints_ids) this.scalepoints_ids = new Array(); for (var i = 0; i < this.x.length; i++){ this.scalepoints_ids.push(DrawPoint(this.dom_attach,this.x[i],this.y[i],'r="5" fill="#0000ff" stroke="#ffffff" stroke-width="2.5"',this.scale)); } for (var i = 0; i < this.scalepoints_ids.length; i++) $('#'+this.scalepoints_ids[i]).mousedown({obj: this,point: i},function(e) { <|fim▁hole|> } /** This function removes the displayed scaling points for a polygon */ this.RemoveScalingPoints = function (){ if(this.scalepoints_ids) { for(var i = 0; i < this.scalepoints_ids.length; i++) $('#'+this.scalepoints_ids[i]).remove(); this.scalepoints_ids = null; } } /** This function shows the control points for a polygon */ this.ShowControlPoints = function() { if(!this.control_ids) this.control_ids = new Array(); for(var i = 0; i < this.x.length; i++) { // Draw control point: this.control_ids.push(DrawPoint(this.dom_attach,this.x[i],this.y[i],'r="5" fill="#00ff00" stroke="#ffffff" stroke-width="2.5"',this.scale)); // Set action: $('#'+this.control_ids[i]).mousedown({obj: this,point: i},function(e) { return e.data.obj.StartMoveControlPoint(e.data.point); }); } }; /** This function removes the displayed control points for a polygon */ this.RemoveControlPoints = function() { if(this.control_ids) { for(var i = 0; i < this.control_ids.length; i++) $('#'+this.control_ids[i]).remove(); this.control_ids = null; } }; /** This function shows the middle grab point for a polygon. */ this.ShowCenterOfMass = function() { var MarkerSize = 8; if(this.x.length==1) MarkerSize = 6; // Get center point for polygon: this.CenterOfMass(this.x,this.y); // Draw center point: this.center_id = DrawPoint(this.dom_attach,this.center_x,this.center_y,'r="' + MarkerSize + '" fill="red" stroke="#ffffff" stroke-width="' + MarkerSize/2 + '"',this.scale); // Set action: $('#'+this.center_id).mousedown({obj: this},function(e) { return e.data.obj.StartMoveCenterOfMass(); }); }; /** This function removes the middle grab point for a polygon */ this.RemoveCenterOfMass = function() { if(this.center_id) { $('#'+this.center_id).remove(); this.center_id = null; } }; /** This function is called when one scaling point is clicked * It prepares the polygon for scaling. * @param {int} i - the index of the scaling point being modified */ this.StartMoveScalingPoint = function(i) { if(!this.isEditingScalingPoint) { $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousemove({obj: this},function(e) { return e.data.obj.MoveScalingPoint(e.originalEvent, !e.data.obj.bounding_box); }); $('#body').mouseup({obj: this},function(e) { return e.data.obj.StopMoveScalingPoint(e.originalEvent); }); this.RemoveCenterOfMass(); this.selectedScalingPoint = i; this.isEditingScalingPoint = true; this.editedControlPoints = true; } }; /** This function is called when one scaling point is being moved * It computes the position of the scaling point in relation to the polygon's center of mass * and resizes the polygon accordingly * @param {event} event - Indicates a point is being moved and the index of such point */ this.MoveScalingPoint = function(event, proportion) { var x = GetEventPosX(event); var y = GetEventPosY(event); if(this.isEditingScalingPoint && (this.scale_button_pressed || this.bounding_box)) { var origx, origy, pointx, pointy, prx, pry; pointx = this.x[this.selectedScalingPoint]; pointy = this.y[this.selectedScalingPoint]; this.CenterOfMass(this.x,this.y); var sx = pointx - this.center_x; var sy = pointy - this.center_y; if (sx < 0) origx = Math.max.apply(Math, this.x); else origx = Math.min.apply(Math, this.x); if (sy < 0) origy = Math.max.apply(Math, this.y); else origy = Math.min.apply(Math, this.y); prx = (Math.round(x/this.scale)-origx)/(pointx-origx); pry = (Math.round(y/this.scale)-origy)/(pointy-origy); if (proportion) pry = prx; if (prx <= 0 || pry <= 0 ) return; for (var i = 0; i < this.x.length; i++){ // Set point: var dx = (this.x[i] - origx)*prx; var dy = (this.y[i] - origy)*pry; x = origx + dx; y = origy + dy; this.x[i] = Math.max(Math.min(x,main_media.width_orig),1); this.y[i] = Math.max(Math.min(y,main_media.height_orig),1); } // Remove polygon and redraw: console.log(this.polygon_id); $('#'+this.polygon_id).parent().remove(); $('#'+this.polygon_id).remove(); this.polygon_id = this.DrawPolygon(this.dom_attach,this.x,this.y,this.obj_name,this.scale); select_anno.polygon_id = this.polygon_id; // Adjust control points: this.RemoveScalingPoints(); this.ShowScalingPoints(); } }; /** This function is called when one scaling point stops being moved * It updates the xml with the new coordinates of the polygon. * @param {event} event - Indicates a point is being moved and the index of such point */ this.StopMoveScalingPoint = function(event) { console.log('Moving scaling point'); if(this.isEditingScalingPoint) { this.MoveScalingPoint(event, !this.bounding_box); FillPolygon(this.polygon_id); this.isEditingScalingPoint = false; if (video_mode) main_media.UpdateObjectPosition(select_anno, this.x, this.y); this.ShowCenterOfMass(); // Set action: $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousedown({obj: this},function(e) { return e.data.obj.StopAdjustEvent(); }); } }; /** This function is called when one control point is clicked * @param {int} i - the index of the control point being modified */ this.StartMoveControlPoint = function(i) { if(!this.isEditingControlPoint) { $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousemove({obj: this},function(e) { return e.data.obj.MoveControlPoint(e.originalEvent); }); $('#body').mouseup({obj: this},function(e) { return e.data.obj.StopMoveControlPoint(e.originalEvent); }); this.RemoveCenterOfMass(); this.selectedControlPoint = i; this.isEditingControlPoint = true; this.editedControlPoints = true; } }; /** This function is called when one control point is being moved * @param {event} event - Indicates a point is being moved and the index of such point */ this.MoveControlPoint = function(event) { if(this.isEditingControlPoint) { var x = GetEventPosX(event); var y = GetEventPosY(event); // Set point: this.x[this.selectedControlPoint] = Math.max(Math.min(Math.round(x/this.scale),main_media.width_orig),1); this.y[this.selectedControlPoint] = Math.max(Math.min(Math.round(y/this.scale),main_media.height_orig),1); this.originalx = this.x; this.originaly = this.y; // Remove polygon and redraw: $('#'+this.polygon_id).parent().remove(); $('#'+this.polygon_id).remove(); this.polygon_id = this.DrawPolygon(this.dom_attach,this.x,this.y,this.obj_name,this.scale); select_anno.polygon_id = this.polygon_id; // Adjust control points: this.RemoveControlPoints(); this.ShowControlPoints(); } }; /** This function is called when one control point stops being moved * It updates the xml with the new coordinates of the polygon. * @param {event} event - Indicates a point is being moved and the index of such point */ this.StopMoveControlPoint = function(event) { console.log('Moving control point'); if(this.isEditingControlPoint) { this.MoveControlPoint(event); FillPolygon(this.polygon_id); this.ShowCenterOfMass(); this.isEditingControlPoint = false; if (video_mode) main_media.UpdateObjectPosition(select_anno, this.x, this.y); // Set action: $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousedown({obj: this},function(e) { return e.data.obj.StopAdjustEvent(); }); } }; /** This function is called when the middle grab point is clicked * It prepares the polygon for moving. */ this.StartMoveCenterOfMass = function() { if(!this.isMovingCenterOfMass) { $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousemove({obj: this},function(e) { return e.data.obj.MoveCenterOfMass(e.originalEvent); }); $('#body').mouseup({obj: this},function(e) { return e.data.obj.StopMoveCenterOfMass(e.originalEvent); }); this.RemoveScalingPoints(); this.RemoveControlPoints(); this.isMovingCenterOfMass = true; this.editedControlPoints = true; } }; /** This function is called when the middle grab point is being moved * @param {event} event - Indicates the middle grab point is moving * It modifies the control points to be consistent with the polygon shift */ this.MoveCenterOfMass = function(event) { if(this.isMovingCenterOfMass) { var x = GetEventPosX(event); var y = GetEventPosY(event); // Get displacement: var dx = Math.round(x/this.scale)-this.center_x; var dy = Math.round(y/this.scale)-this.center_y; // Adjust dx,dy to make sure we don't go outside of the image: for(var i = 0; i < this.x.length; i++) { dx = Math.max(this.x[i]+dx,1)-this.x[i]; dy = Math.max(this.y[i]+dy,1)-this.y[i]; dx = Math.min(this.x[i]+dx,main_media.width_orig)-this.x[i]; dy = Math.min(this.y[i]+dy,main_media.height_orig)-this.y[i]; } // Adjust polygon and center point: for(var i = 0; i < this.x.length; i++) { this.x[i] = Math.round(this.x[i]+dx); this.y[i] = Math.round(this.y[i]+dy); } this.center_x = Math.round(this.scale*(dx+this.center_x)); this.center_y = Math.round(this.scale*(dy+this.center_y)); // Remove polygon and redraw: $('#'+this.polygon_id).parent().remove(); $('#'+this.polygon_id).remove(); this.polygon_id = this.DrawPolygon(this.dom_attach,this.x,this.y,this.obj_name,this.scale); select_anno.polygon_id = this.polygon_id; // Redraw center of mass: this.RemoveCenterOfMass(); this.ShowCenterOfMass(); } }; /** This function is called when the middle grab point stops being moved * It updates the xml with the new coordinates of the polygon. * @param {event} event - Indicates the middle grab point is being moved and the index of such point */ this.StopMoveCenterOfMass = function(event) { if(this.isMovingCenterOfMass) { // Move to final position: this.MoveCenterOfMass(event); // Refresh control points: if (this.bounding_box){ this.RemoveScalingPoints(); this.RemoveCenterOfMass(); this.ShowScalingPoints(); this.ShowCenterOfMass(); } else { this.RemoveControlPoints(); this.RemoveCenterOfMass(); this.ShowControlPoints(); this.ShowCenterOfMass(); } FillPolygon(this.polygon_id); this.isMovingCenterOfMass = false; if (video_mode) main_media.UpdateObjectPosition(select_anno, this.x, this.y); // Set action: $('#'+this.dom_attach).unbind(); $('#'+this.dom_attach).mousedown({obj: this},function(e) { return e.data.obj.StopAdjustEvent(); }); } }; /*************** Helper functions ****************/ /** Compute center of mass for a polygon given array of points (x,y): */ this.CenterOfMass = function(x,y) { var N = x.length; // Center of mass for a single point: if(N==1) { this.center_x = x[0]; this.center_y = y[0]; return; } // The center of mass is the average polygon edge midpoint weighted by // edge length: this.center_x = 0; this.center_y = 0; var perimeter = 0; for(var i = 1; i <= N; i++) { var length = Math.round(Math.sqrt(Math.pow(x[i-1]-x[i%N], 2) + Math.pow(y[i-1]-y[i%N], 2))); this.center_x += length*Math.round((x[i-1] + x[i%N])/2); this.center_y += length*Math.round((y[i-1] + y[i%N])/2); perimeter += length; } this.center_x /= perimeter; this.center_y /= perimeter; }; this.DrawPolygon = function(dom_id,x,y,obj_name,scale) { if(x.length==1) return DrawFlag(dom_id,x[0],y[0],obj_name,scale); var attr = 'fill="none" stroke="' + HashObjectColor(obj_name) + '" stroke-width="4"'; return DrawPolygon(dom_id,x,y,obj_name,attr,scale); }; }<|fim▁end|>
return e.data.obj.StartMoveScalingPoint(e.data.point); });
<|file_name|>TimeoutManager.java<|end_file_name|><|fim▁begin|><|fim▁hole|> * Copyright (C) 2000 - 2012 Silverpeas * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * As a special exception to the terms and conditions of version 3.0 of * the GPL, you may redistribute this Program in connection with Free/Libre * Open Source Software ("FLOSS") applications as described in Silverpeas's * FLOSS exception. You should have received a copy of the text describing * the FLOSS exception, and it is also available here: * "http://www.silverpeas.org/docs/core/legal/floss_exception.html" * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.silverpeas.workflow.api; /** * The workflow engine services related to timeout (on active states) management. */ public interface TimeoutManager { /** * Initialize the timeout manager */ public void initialize(); }<|fim▁end|>
/**
<|file_name|>main.ts<|end_file_name|><|fim▁begin|>// #docregion import { enableProdMode } from '@angular/core';<|fim▁hole|>import { environment } from './environments/environment'; if (environment.production) { enableProdMode(); } platformBrowserDynamic().bootstrapModule(AppModule);<|fim▁end|>
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; import { AppModule } from './app/app.module';
<|file_name|>jet.py<|end_file_name|><|fim▁begin|>from heppy_fcc.particles.jet import Jet as BaseJet<|fim▁hole|>import math class Jet(BaseJet): def __init__(self, fccjet): self.fccjet = fccjet self._tlv = TLorentzVector() p4 = fccjet.Core().P4 self._tlv.SetXYZM(p4.Px, p4.Py, p4.Pz, p4.Mass)<|fim▁end|>
from vertex import Vertex from ROOT import TLorentzVector
<|file_name|>handlers.py<|end_file_name|><|fim▁begin|># # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import heapq, logging, os, re, socket, time, types from proton import dispatch, generate_uuid, PN_ACCEPTED, SASL, symbol, ulong, Url from proton import Collector, Connection, Delivery, Described, Endpoint, Event, Link, Terminus, Timeout from proton import Message, Handler, ProtonException, Transport, TransportException, ConnectionException from select import select class OutgoingMessageHandler(Handler): """ A utility for simpler and more intuitive handling of delivery events related to outgoing i.e. sent messages. """ def __init__(self, auto_settle=True, delegate=None): self.auto_settle = auto_settle self.delegate = delegate def on_link_flow(self, event): if event.link.is_sender and event.link.credit: self.on_sendable(event) def on_delivery(self, event): dlv = event.delivery if dlv.link.is_sender and dlv.updated: if dlv.remote_state == Delivery.ACCEPTED: self.on_accepted(event) elif dlv.remote_state == Delivery.REJECTED: self.on_rejected(event) elif dlv.remote_state == Delivery.RELEASED or dlv.remote_state == Delivery.MODIFIED: self.on_released(event) if dlv.settled: self.on_settled(event) if self.auto_settle: dlv.settle() def on_sendable(self, event): """ Called when the sender link has credit and messages can therefore be transferred. """ if self.delegate: dispatch(self.delegate, 'on_sendable', event) def on_accepted(self, event): """ Called when the remote peer accepts an outgoing message. """ if self.delegate: dispatch(self.delegate, 'on_accepted', event) def on_rejected(self, event): """ Called when the remote peer rejects an outgoing message. """ if self.delegate: dispatch(self.delegate, 'on_rejected', event) def on_released(self, event): """ Called when the remote peer releases an outgoing message. Note that this may be in response to either the RELEASE or MODIFIED state as defined by the AMQP specification. """ if self.delegate: dispatch(self.delegate, 'on_released', event) def on_settled(self, event): """ Called when the remote peer has settled the outgoing message. This is the point at which it shouod never be retransmitted. """ if self.delegate: dispatch(self.delegate, 'on_settled', event) def recv_msg(delivery): msg = Message() msg.decode(delivery.link.recv(delivery.pending)) delivery.link.advance() return msg class Reject(ProtonException): """ An exception that indicate a message should be rejected """ pass class Release(ProtonException): """ An exception that indicate a message should be rejected """ pass class Acking(object): def accept(self, delivery): """ Accepts a received message. """ self.settle(delivery, Delivery.ACCEPTED) def reject(self, delivery): """ Rejects a received message that is considered invalid or unprocessable. """ self.settle(delivery, Delivery.REJECTED) def release(self, delivery, delivered=True): """ Releases a received message, making it available at the source for any (other) interested receiver. The ``delivered`` parameter indicates whether this should be considered a delivery attempt (and the delivery count updated) or not. """ if delivered: self.settle(delivery, Delivery.MODIFIED) else: self.settle(delivery, Delivery.RELEASED) def settle(self, delivery, state=None): if state: delivery.update(state) delivery.settle() class IncomingMessageHandler(Handler, Acking): """ A utility for simpler and more intuitive handling of delivery events related to incoming i.e. received messages. """<|fim▁hole|> def __init__(self, auto_accept=True, delegate=None): self.delegate = delegate self.auto_accept = auto_accept def on_delivery(self, event): dlv = event.delivery if not dlv.link.is_receiver: return if dlv.readable and not dlv.partial: event.message = recv_msg(dlv) if event.link.state & Endpoint.LOCAL_CLOSED: if self.auto_accept: dlv.update(Delivery.RELEASED) dlv.settle() else: try: self.on_message(event) if self.auto_accept: dlv.update(Delivery.ACCEPTED) dlv.settle() except Reject: dlv.update(Delivery.REJECTED) dlv.settle() except Release: dlv.update(Delivery.MODIFIED) dlv.settle() elif dlv.updated and dlv.settled: self.on_settled(event) def on_message(self, event): """ Called when a message is received. The message itself can be obtained as a property on the event. For the purpose of refering to this message in further actions (e.g. if explicitly accepting it, the ``delivery`` should be used, also obtainable via a property on the event. """ if self.delegate: dispatch(self.delegate, 'on_message', event) def on_settled(self, event): if self.delegate: dispatch(self.delegate, 'on_settled', event) class EndpointStateHandler(Handler): """ A utility that exposes 'endpoint' events i.e. the open/close for links, sessions and connections in a more intuitive manner. A XXX_opened method will be called when both local and remote peers have opened the link, session or connection. This can be used to confirm a locally initiated action for example. A XXX_opening method will be called when the remote peer has requested an open that was not initiated locally. By default this will simply open locally, which then triggers the XXX_opened call. The same applies to close. """ def __init__(self, peer_close_is_error=False, delegate=None): self.delegate = delegate self.peer_close_is_error = peer_close_is_error @classmethod def is_local_open(cls, endpoint): return endpoint.state & Endpoint.LOCAL_ACTIVE @classmethod def is_local_uninitialised(cls, endpoint): return endpoint.state & Endpoint.LOCAL_UNINIT @classmethod def is_local_closed(cls, endpoint): return endpoint.state & Endpoint.LOCAL_CLOSED @classmethod def is_remote_open(cls, endpoint): return endpoint.state & Endpoint.REMOTE_ACTIVE @classmethod def is_remote_closed(cls, endpoint): return endpoint.state & Endpoint.REMOTE_CLOSED @classmethod def print_error(cls, endpoint, endpoint_type): if endpoint.remote_condition: logging.error(endpoint.remote_condition.description) elif cls.is_local_open(endpoint) and cls.is_remote_closed(endpoint): logging.error("%s closed by peer" % endpoint_type) def on_link_remote_close(self, event): if event.link.remote_condition: self.on_link_error(event) elif self.is_local_closed(event.link): self.on_link_closed(event) else: self.on_link_closing(event) event.link.close() def on_session_remote_close(self, event): if event.session.remote_condition: self.on_session_error(event) elif self.is_local_closed(event.session): self.on_session_closed(event) else: self.on_session_closing(event) event.session.close() def on_connection_remote_close(self, event): if event.connection.remote_condition: self.on_connection_error(event) elif self.is_local_closed(event.connection): self.on_connection_closed(event) else: self.on_connection_closing(event) event.connection.close() def on_connection_local_open(self, event): if self.is_remote_open(event.connection): self.on_connection_opened(event) def on_connection_remote_open(self, event): if self.is_local_open(event.connection): self.on_connection_opened(event) elif self.is_local_uninitialised(event.connection): self.on_connection_opening(event) event.connection.open() def on_session_local_open(self, event): if self.is_remote_open(event.session): self.on_session_opened(event) def on_session_remote_open(self, event): if self.is_local_open(event.session): self.on_session_opened(event) elif self.is_local_uninitialised(event.session): self.on_session_opening(event) event.session.open() def on_link_local_open(self, event): if self.is_remote_open(event.link): self.on_link_opened(event) def on_link_remote_open(self, event): if self.is_local_open(event.link): self.on_link_opened(event) elif self.is_local_uninitialised(event.link): self.on_link_opening(event) event.link.open() def on_connection_opened(self, event): if self.delegate: dispatch(self.delegate, 'on_connection_opened', event) def on_session_opened(self, event): if self.delegate: dispatch(self.delegate, 'on_session_opened', event) def on_link_opened(self, event): if self.delegate: dispatch(self.delegate, 'on_link_opened', event) def on_connection_opening(self, event): if self.delegate: dispatch(self.delegate, 'on_connection_opening', event) def on_session_opening(self, event): if self.delegate: dispatch(self.delegate, 'on_session_opening', event) def on_link_opening(self, event): if self.delegate: dispatch(self.delegate, 'on_link_opening', event) def on_connection_error(self, event): if self.delegate: dispatch(self.delegate, 'on_connection_error', event) else: self.log_error(event.connection, "connection") def on_session_error(self, event): if self.delegate: dispatch(self.delegate, 'on_session_error', event) else: self.log_error(event.session, "session") event.connection.close() def on_link_error(self, event): if self.delegate: dispatch(self.delegate, 'on_link_error', event) else: self.log_error(event.link, "link") event.connection.close() def on_connection_closed(self, event): if self.delegate: dispatch(self.delegate, 'on_connection_closed', event) def on_session_closed(self, event): if self.delegate: dispatch(self.delegate, 'on_session_closed', event) def on_link_closed(self, event): if self.delegate: dispatch(self.delegate, 'on_link_closed', event) def on_connection_closing(self, event): if self.delegate: dispatch(self.delegate, 'on_connection_closing', event) elif self.peer_close_is_error: self.on_connection_error(event) def on_session_closing(self, event): if self.delegate: dispatch(self.delegate, 'on_session_closing', event) elif self.peer_close_is_error: self.on_session_error(event) def on_link_closing(self, event): if self.delegate: dispatch(self.delegate, 'on_link_closing', event) elif self.peer_close_is_error: self.on_link_error(event) def on_transport_tail_closed(self, event): self.on_transport_closed(event) def on_transport_closed(self, event): if self.delegate and event.connection and self.is_local_open(event.connection): dispatch(self.delegate, 'on_disconnected', event) class MessagingHandler(Handler, Acking): """ A general purpose handler that makes the proton-c events somewhat simpler to deal with and/or avoids repetitive tasks for common use cases. """ def __init__(self, prefetch=10, auto_accept=True, auto_settle=True, peer_close_is_error=False): self.handlers = [] if prefetch: self.handlers.append(CFlowController(prefetch)) self.handlers.append(EndpointStateHandler(peer_close_is_error, self)) self.handlers.append(IncomingMessageHandler(auto_accept, self)) self.handlers.append(OutgoingMessageHandler(auto_settle, self)) def on_connection_error(self, event): """ Called when the peer closes the connection with an error condition. """ EndpointStateHandler.print_error(event.connection, "connection") def on_session_error(self, event): """ Called when the peer closes the session with an error condition. """ EndpointStateHandler.print_error(event.session, "session") event.connection.close() def on_link_error(self, event): """ Called when the peer closes the link with an error condition. """ EndpointStateHandler.print_error(event.link, "link") event.connection.close() def on_reactor_init(self, event): """ Called when the event loop - the reactor - starts. """ if hasattr(event.reactor, 'subclass'): setattr(event, event.reactor.subclass.__name__.lower(), event.reactor) self.on_start(event) def on_start(self, event): """ Called when the event loop starts. (Just an alias for on_reactor_init) """ pass def on_connection_closed(self, event): """ Called when the connection is closed. """ pass def on_session_closed(self, event): """ Called when the session is closed. """ pass def on_link_closed(self, event): """ Called when the link is closed. """ pass def on_connection_closing(self, event): """ Called when the peer initiates the closing of the connection. """ pass def on_session_closing(self, event): """ Called when the peer initiates the closing of the session. """ pass def on_link_closing(self, event): """ Called when the peer initiates the closing of the link. """ pass def on_disconnected(self, event): """ Called when the socket is disconnected. """ pass def on_sendable(self, event): """ Called when the sender link has credit and messages can therefore be transferred. """ pass def on_accepted(self, event): """ Called when the remote peer accepts an outgoing message. """ pass def on_rejected(self, event): """ Called when the remote peer rejects an outgoing message. """ pass def on_released(self, event): """ Called when the remote peer releases an outgoing message. Note that this may be in response to either the RELEASE or MODIFIED state as defined by the AMQP specification. """ pass def on_settled(self, event): """ Called when the remote peer has settled the outgoing message. This is the point at which it shouod never be retransmitted. """ pass def on_message(self, event): """ Called when a message is received. The message itself can be obtained as a property on the event. For the purpose of refering to this message in further actions (e.g. if explicitly accepting it, the ``delivery`` should be used, also obtainable via a property on the event. """ pass class TransactionHandler(object): """ The interface for transaction handlers, i.e. objects that want to be notified of state changes related to a transaction. """ def on_transaction_declared(self, event): pass def on_transaction_committed(self, event): pass def on_transaction_aborted(self, event): pass def on_transaction_declare_failed(self, event): pass def on_transaction_commit_failed(self, event): pass class TransactionalClientHandler(MessagingHandler, TransactionHandler): """ An extension to the MessagingHandler for applications using transactions. """ def __init__(self, prefetch=10, auto_accept=False, auto_settle=True, peer_close_is_error=False): super(TransactionalClientHandler, self).__init__(prefetch, auto_accept, auto_settle, peer_close_is_error) def accept(self, delivery, transaction=None): if transaction: transaction.accept(delivery) else: super(TransactionalClientHandler, self).accept(delivery) from proton import WrappedHandler from cproton import pn_flowcontroller, pn_handshaker, pn_iohandler class CFlowController(WrappedHandler): def __init__(self, window=1024): WrappedHandler.__init__(self, lambda: pn_flowcontroller(window)) class CHandshaker(WrappedHandler): def __init__(self): WrappedHandler.__init__(self, pn_handshaker) class IOHandler(WrappedHandler): def __init__(self): WrappedHandler.__init__(self, pn_iohandler) class PythonIO: def __init__(self): self.selectables = [] self.delegate = IOHandler() def on_unhandled(self, method, event): event.dispatch(self.delegate) def on_selectable_init(self, event): self.selectables.append(event.context) def on_selectable_updated(self, event): pass def on_selectable_final(self, event): sel = event.context if sel.is_terminal: self.selectables.remove(sel) sel.release() def on_reactor_quiesced(self, event): reactor = event.reactor # check if we are still quiesced, other handlers of # on_reactor_quiesced could have produced events to process if not reactor.quiesced: return reading = [] writing = [] deadline = None for sel in self.selectables: if sel.reading: reading.append(sel) if sel.writing: writing.append(sel) if sel.deadline: if deadline is None: deadline = sel.deadline else: deadline = min(sel.deadline, deadline) if deadline is not None: timeout = deadline - time.time() else: timeout = reactor.timeout if (timeout < 0): timeout = 0 timeout = min(timeout, reactor.timeout) readable, writable, _ = select(reading, writing, [], timeout) reactor.mark() now = time.time() for s in readable: s.readable() for s in writable: s.writable() for s in self.selectables: if s.deadline and now > s.deadline: s.expired() reactor.yield_()<|fim▁end|>
<|file_name|>CamelCase.js<|end_file_name|><|fim▁begin|>import {expect} from 'chai'; import {camel_case} from '../src';<|fim▁hole|> describe('CamelCase', () => { it('should be exists.', () => { expect(camel_case).to.be.exists; }); it('should convert helloThere to HelloThere.', () => { expect(camel_case('helloThere')).to.be.equals('HelloThere'); }); it('should convert i_am_a_robot to IAmARobot.', () => { expect(camel_case('i_am_a_robot')).to.be.equals('IAmARobot'); }); });<|fim▁end|>
<|file_name|>stable-addr-of.rs<|end_file_name|><|fim▁begin|>// run-pass // Issue #2040 pub fn main() { let foo: isize = 1;<|fim▁hole|>}<|fim▁end|>
assert_eq!(&foo as *const isize, &foo as *const isize);
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![feature(clamp)] use std::{env, fs}; fn main() { let uid = unsafe { geteuid() }; if uid != 0 { panic!("UID {} is not root", uid); } let args: Vec<String> = env::args().collect(); match args.as_slice() { [_, x] => run(x), _ => help(), } } fn run(cmd: &str) { match cmd { "+" => set(percent() + 10), "-" => set(percent() - 10),<|fim▁hole|>} fn help() { println!("brightness +|-|N"); } fn max() -> i64 { read("/sys/class/backlight/intel_backlight/max_brightness") } fn current() -> i64 { read("/sys/class/backlight/intel_backlight/brightness") } fn percent() -> i64 { (100 * current()) / max() } fn set(p: i64) { let p = p.clamp(5, 100); write(p * max() / 100) } fn read(path: &str) -> i64 { fs::read_to_string(path) .unwrap_or_else(|_| panic!("Couldn't open file: {}", path)) .trim() .parse::<i64>() .unwrap_or_else(|e| panic!("Failed to read number from `{}`", e)) } fn write(b: i64) { fs::write( "/sys/class/backlight/intel_backlight/brightness", b.to_string(), ) .unwrap(); } #[link(name = "c")] extern "C" { fn geteuid() -> u32; } #[cfg(test)] mod test { use super::*; #[test] fn t_test() { assert_eq!(7500, max()); assert!(current() <= 7500); assert!(percent() < 100); } #[test] #[ignore] fn t_write() { write(5000) } }<|fim▁end|>
n if n.parse::<i64>().is_ok() => set(n.parse::<i64>().unwrap()), _ => help(), }
<|file_name|>aiplatform_v1_generated_metadata_service_create_context_sync.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateContext # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_v1_generated_MetadataService_CreateContext_sync] from google.cloud import aiplatform_v1 def sample_create_context():<|fim▁hole|> # Initialize request argument(s) request = aiplatform_v1.CreateContextRequest( parent="parent_value", ) # Make the request response = client.create_context(request=request) # Handle the response print(response) # [END aiplatform_v1_generated_MetadataService_CreateContext_sync]<|fim▁end|>
# Create a client client = aiplatform_v1.MetadataServiceClient()
<|file_name|>Close.tsx<|end_file_name|><|fim▁begin|>import { BoostnoteIconProps, BoostnoteIconStyledContainer } from '../../lib/icons' import React from 'react' export const IconClose = (props: BoostnoteIconProps) => ( <BoostnoteIconStyledContainer> <svg width='1em' height='1em' viewBox='0 0 39 39' {...props} style={ props.size != null ? { ...props.style, width: props.size, height: props.size } : props.style } > <path d='M38.414.586a2 2 0 010 2.828L22.328 19.499l16.087 16.086a2 2 0 01-1.244 3.407l-.17.007a1.992 1.992 0 01-1.414-.586L19.499 22.327 3.414 38.414a1.99 1.99 0 01-1.244.579L2 39a2 2 0 01-1.414-3.414L16.67 19.499.587 3.414A2 2 0 011.92.001h.162c.484.02.963.214 1.333.584L19.5 16.671 35.586.586A1.994 1.994 0 0137.08.002l.16.013c.43.052.844.242 1.173.57z' fill='currentColor' fillRule='evenodd' /> </svg> </BoostnoteIconStyledContainer><|fim▁hole|>)<|fim▁end|>
<|file_name|>sale_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv<|fim▁hole|>class sale_order(osv.osv): _inherit = 'sale.order' def action_button_confirm(self, cr, uid, ids, context=None): # fetch the partner's id and subscribe the partner to the sale order assert len(ids) == 1 order = self.browse(cr, uid, ids[0], context=context) add_delivery_method = True only_service = True delivery_method = self.pool.get('delivery.carrier').search(cr, uid, [('default_in_sales', '=', True)]) if delivery_method: delivery_method = self.pool.get('delivery.carrier').browse(cr, uid, delivery_method[0]) if order.amount_untaxed < delivery_method.min_amount and not order.carrier_id: if order.partner_id.without_delivery: add_delivery_method = False else: for order_line in order.order_line: if order_line.product_id: if order_line.product_id.without_delivery: add_delivery_method = False break elif order_line.product_id.type != 'service': only_service = False if only_service: add_delivery_method = False if add_delivery_method: delivery_method = delivery_method.id self.write(cr, uid, ids[0], {'carrier_id': delivery_method}) return super(sale_order, self).action_button_confirm(cr, uid, ids, context=context)<|fim▁end|>
from openerp.tools.translate import _
<|file_name|>check.rs<|end_file_name|><|fim▁begin|>use std::error::Error; use std::fmt::{self, Debug, Display}; use futures::future::BoxFuture; use crate::client::Context; use crate::framework::standard::{Args, CommandOptions}; use crate::model::channel::Message; /// This type describes why a check has failed. /// /// **Note**: /// The bot-developer is supposed to process this `enum` as the framework is not. /// It solely serves as a way to inform a user about why a check /// has failed and for the developer to log given failure (e.g. bugs or statistics) /// occurring in [`Check`]s. #[derive(Clone, Debug)] #[non_exhaustive] pub enum Reason { /// No information on the failure. Unknown, /// Information dedicated to the user. User(String), /// Information purely for logging purposes. Log(String), /// Information for the user but also for logging purposes. UserAndLog { user: String, log: String }, } impl Error for Reason {} pub type CheckFunction = for<'fut> fn( &'fut Context, &'fut Message, &'fut mut Args, &'fut CommandOptions, ) -> BoxFuture<'fut, Result<(), Reason>>; /// A check can be part of a command or group and will be executed to /// determine whether a user is permitted to use related item. /// /// Additionally, a check may hold additional settings. pub struct Check { /// Name listed in help-system. pub name: &'static str, /// Function that will be executed. pub function: CheckFunction, /// Whether a check should be evaluated in the help-system. /// `false` will ignore check and won't fail execution. pub check_in_help: bool, /// Whether a check shall be listed in the help-system. /// `false` won't affect whether the check will be evaluated help, /// solely [`Self::check_in_help`] sets this. pub display_in_help: bool, } impl Debug for Check { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Check") .field("name", &self.name) .field("function", &"<fn>") .field("check_in_help", &self.check_in_help) .field("display_in_help", &self.display_in_help) .finish() } } impl Display for Reason { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Unknown => f.write_str("Unknown"), Self::User(reason) => write!(f, "User {}", reason), Self::Log(reason) => write!(f, "Log {}", reason), Self::UserAndLog { user, log, } => { write!(f, "UserAndLog {{user: {}, log: {}}}", user, log) }, } } } impl PartialEq for Check {<|fim▁hole|> fn eq(&self, other: &Self) -> bool { self.name == other.name } }<|fim▁end|>
<|file_name|>pages.go<|end_file_name|><|fim▁begin|>package main import ( "fmt" "net/http" "strings" ) type formField struct { Present bool Value string } func (f formField) UpdatePresent(p bool) formField { f.Present = p return f } func checkField(key string, r *http.Request) formField { var f formField f.Value = r.FormValue(key) if len(f.Value) != 0 { f.Present = true } else { f.Present = false } return f } type pageDataMin struct { TableCreated bool Title string Heading string } func newPageDataMin(title, heading string) pageDataMin { return pageDataMin{tableCreated, title, heading} } type pageData struct { pageDataMin Recs [][]string Finfo map[string]formField } func updatePageData(p *pageData, title, heading string) { p.pageDataMin = newPageDataMin(title, heading) } func root(w http.ResponseWriter, r *http.Request) { pgData := newPageDataMin("Home Page", "Welcome to the CURD Server") tmpl.ExecuteTemplate(w, "index.gohtml", pgData) } func existingTable(w http.ResponseWriter, r *http.Request) bool { if !tableCreated { http.Redirect(w, r, "/", http.StatusTemporaryRedirect) return false } return tableCreated } func createTable(w http.ResponseWriter, r *http.Request) { if tableCreated { http.Redirect(w, r, "/", http.StatusTemporaryRedirect) return } // Call the Creation function err := dbCreateTable() if err == nil { tableCreated = true pgData := newPageDataMin("Table Creation", "Table Successfully Created !") tmpl.ExecuteTemplate(w, "index.gohtml", pgData) } else if strings.Contains(err.Error(), "Error 1050") { tableCreated = true pgData := newPageDataMin("Table Creation", "Table Already Exists !") tmpl.ExecuteTemplate(w, "index.gohtml", pgData) } else { check(err) http.Redirect(w, r, "/", http.StatusTemporaryRedirect) } } func add(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } pgData := newPageDataMin("Add Records", "New Record") err := dbAddRecord(r) if err == nil { pgData.Heading = " Record Created Successfully" } tmpl.ExecuteTemplate(w, "add.gohtml", pgData) } func readAll(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } var pgData pageData var err error // Actually Read all data pgData.Recs, err = dbReadAll(r) if err == nil { updatePageData(&pgData, "Reading All Records", "All Records") tmpl.ExecuteTemplate(w, "readall.gohtml", pgData) } else { check(err) http.Redirect(w, r, "/", http.StatusTemporaryRedirect) } } func findRecord(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } var err error var pgData pageData wasFind := r.FormValue("submit") == "Find it" if wasFind { pgData, err = dbSearch(r) } if err != nil { check(err) http.Redirect(w, r, "/", http.StatusTemporaryRedirect) } updatePageData(&pgData, "Find Records", "Finding Records of Interest") tmpl.ExecuteTemplate(w, "find.gohtml", pgData) } func updateRecord(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } var err error var pgData pageData wasGet := r.FormValue("submit") == "Get" //wasUpdate := r.FormValue("submit") == "Update" // Execute the Update n, pgData, err := dbUpdateRecord(r) updatePageData(&pgData, "Record Update", "Updating Records") if err != nil { pgData.Heading += " -" + err.Error() } else { if n > 0 { if n > 1 { pgData.Heading += fmt.Sprintf("- Found %d records", n) } else { if wasGet { pgData.Heading += "- Found one" } else { pgData.Heading += "- Updated Record" } } } else { pgData.Heading = "Updating Records - Nothing Found" } } tmpl.ExecuteTemplate(w, "update.gohtml", pgData) } func deleteRecord(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } var err error<|fim▁hole|> updatePageData(&pgData, "Record Deletion", "Delete Record") if err != nil { pgData.Heading += " -" + err.Error() } else { if wasGet { pgData.Heading += " - Found one" } else { pgData.Heading += " - Successful" } } tmpl.ExecuteTemplate(w, "delete.gohtml", pgData) } func dropTable(w http.ResponseWriter, r *http.Request) { if !existingTable(w, r) { return } // Call Drop Table err := dbDropTable() if err == nil { tableCreated = false pgData := newPageDataMin("Remove Table", "Table Deleted Succesfully !") tmpl.ExecuteTemplate(w, "index.gohtml", pgData) } else { http.Redirect(w, r, "/", http.StatusTemporaryRedirect) } }<|fim▁end|>
var pgData pageData wasGet := r.FormValue("submit") == "Get" pgData, err = dbDeleteRecord(r)
<|file_name|>regionck-unboxed-closure-lifetimes.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(unboxed_closures, overloaded_calls)] use std::ops::FnMut; fn main() { let mut f; { let c = 1; let c_ref = &c; //~ ERROR `c` does not live long enough f = move |&mut: a: int, b: int| { a + b + *c_ref }; } }<|fim▁end|>
<|file_name|>background.js<|end_file_name|><|fim▁begin|>/* * Copyright 2019 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software<|fim▁hole|> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; const showGreenIconForExtensionPages = { conditions: [ new chrome.declarativeContent.PageStateMatcher({ pageUrl: { hostEquals: chrome.runtime.id, schemes: ['chrome-extension'], pathEquals: '/index.html', }, }), ], actions: [new chrome.declarativeContent.SetIcon({ path: 'icon-green.png' })], }; chrome.runtime.onInstalled.addListener(function () { chrome.declarativeContent.onPageChanged.removeRules(undefined, function () { chrome.declarativeContent.onPageChanged.addRules([ showGreenIconForExtensionPages, ]); }); }); async function loadPageInASecureSandbox({ id: tabId }) { const src = ( await new Promise((resolve) => chrome.tabs.executeScript(tabId, { file: 'getInnerHtml.js' }, resolve) ) )?.[0]; // It's possible that the chrome extension cannot read the source code, either // because it is served via a non-permitted scheme (eg `chrome-extension://`), // or bc the user/adminstrator has denied this extension access to the page. if (!src) { chrome.notifications.create(undefined, { type: 'basic', title: 'Could not sandbox this page', message: 'The extension does not have permission to modify this page.', iconUrl: 'icon-red.png', isClickable: false, eventTime: Date.now(), }); return; } const searchParams = new URLSearchParams({ src }); const url = `index.html?${searchParams.toString()}`; chrome.tabs.update({ url }); } chrome.browserAction.onClicked.addListener(loadPageInASecureSandbox);<|fim▁end|>
* distributed under the License is distributed on an "AS IS" BASIS,
<|file_name|>AwsClientBuilder.java<|end_file_name|><|fim▁begin|>/* * Copyright 2011-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.client.builder; import com.amazonaws.AmazonWebServiceClient; import com.amazonaws.ClientConfiguration; import com.amazonaws.ClientConfigurationFactory; import com.amazonaws.PredefinedClientConfigurations; import com.amazonaws.SdkClientException; import com.amazonaws.annotation.NotThreadSafe; import com.amazonaws.annotation.SdkInternalApi; import com.amazonaws.annotation.SdkProtectedApi; import com.amazonaws.annotation.SdkTestInternalApi; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.client.AwsAsyncClientParams; import com.amazonaws.client.AwsSyncClientParams; import com.amazonaws.monitoring.MonitoringListener; import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.metrics.RequestMetricCollector; import com.amazonaws.monitoring.CsmConfigurationProvider; import com.amazonaws.monitoring.DefaultCsmConfigurationProviderChain; import com.amazonaws.regions.AwsRegionProvider; import com.amazonaws.regions.DefaultAwsRegionProviderChain; import com.amazonaws.regions.Region; import com.amazonaws.regions.RegionUtils; import com.amazonaws.regions.Regions; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; /** * Base class for all service specific client builders. * * @param <Subclass> Concrete builder type, used for better fluent methods. * @param <TypeToBuild> Type that this builder builds. */ @NotThreadSafe @SdkProtectedApi public abstract class AwsClientBuilder<Subclass extends AwsClientBuilder, TypeToBuild> { /** * Default Region Provider chain. Used only when the builder is not explicitly configured with a * region. */ private static final AwsRegionProvider DEFAULT_REGION_PROVIDER = new DefaultAwsRegionProviderChain(); /** * Different services may have custom client configuration factories to vend defaults tailored * for that service. If no explicit client configuration is provided to the builder the default * factory for the service is used. */ private final ClientConfigurationFactory clientConfigFactory; /** * {@link AwsRegionProvider} to use when no explicit region or endpointConfiguration is configured. * This is currently not exposed for customization by customers. */ private final AwsRegionProvider regionProvider; private final AdvancedConfig.Builder advancedConfig = AdvancedConfig.builder(); private AWSCredentialsProvider credentials; private ClientConfiguration clientConfig; private RequestMetricCollector metricsCollector; private Region region; private List<RequestHandler2> requestHandlers; private EndpointConfiguration endpointConfiguration; private CsmConfigurationProvider csmConfig; private MonitoringListener monitoringListener; protected AwsClientBuilder(ClientConfigurationFactory clientConfigFactory) { this(clientConfigFactory, DEFAULT_REGION_PROVIDER); } @SdkTestInternalApi protected AwsClientBuilder(ClientConfigurationFactory clientConfigFactory, AwsRegionProvider regionProvider) { this.clientConfigFactory = clientConfigFactory; this.regionProvider = regionProvider; } /** * Gets the AWSCredentialsProvider currently configured in the builder. */ public final AWSCredentialsProvider getCredentials() { return this.credentials; } /** * Sets the AWSCredentialsProvider used by the client. If not specified the default is {@link * DefaultAWSCredentialsProviderChain}. * * @param credentialsProvider New AWSCredentialsProvider to use. */ public final void setCredentials(AWSCredentialsProvider credentialsProvider) { this.credentials = credentialsProvider; } /** * Sets the AWSCredentialsProvider used by the client. If not specified the default is {@link * DefaultAWSCredentialsProviderChain}. * * @param credentialsProvider New AWSCredentialsProvider to use. * @return This object for method chaining. */ public final Subclass withCredentials(AWSCredentialsProvider credentialsProvider) { setCredentials(credentialsProvider); return getSubclass(); } /** * If the builder isn't explicitly configured with credentials we use the {@link * DefaultAWSCredentialsProviderChain}. */ private AWSCredentialsProvider resolveCredentials() { return (credentials == null) ? DefaultAWSCredentialsProviderChain.getInstance() : credentials; } /** * Gets the ClientConfiguration currently configured in the builder */ public final ClientConfiguration getClientConfiguration() { return this.clientConfig; } /** * Sets the ClientConfiguration to be used by the client. If not specified the default is * typically {@link PredefinedClientConfigurations#defaultConfig} but may differ per service. * * @param config Custom configuration to use */ public final void setClientConfiguration(ClientConfiguration config) { this.clientConfig = config; } /** * Sets the ClientConfiguration to be used by the client. If not specified the default is * typically {@link PredefinedClientConfigurations#defaultConfig} but may differ per service. * * @param config Custom configuration to use * @return This object for method chaining. */ public final Subclass withClientConfiguration(ClientConfiguration config) { setClientConfiguration(config); return getSubclass(); } /** * If not explicit client configuration is provided we consult the {@link * ClientConfigurationFactory} of the service. If an explicit configuration is provided we use * ClientConfiguration's copy constructor to avoid mutation. */ private ClientConfiguration resolveClientConfiguration() { return (clientConfig == null) ? clientConfigFactory.getConfig() : new ClientConfiguration(clientConfig); } /** * Gets the {@link RequestMetricCollector} in use by the builder. */ public final RequestMetricCollector getMetricsCollector() { return this.metricsCollector; } /** * Sets a custom RequestMetricCollector to use for the client. * * @param metrics Custom RequestMetricCollector to use. */ public final void setMetricsCollector(RequestMetricCollector metrics) { this.metricsCollector = metrics; } /** * Sets a custom RequestMetricCollector to use for the client. * * @param metrics Custom RequestMetricCollector to use. * @return This object for method chaining. */ public final Subclass withMetricsCollector(RequestMetricCollector metrics) { setMetricsCollector(metrics); return getSubclass(); } /** * Gets the region in use by the builder. */ public final String getRegion() { return region == null ? null : region.getName(); } /** * Sets the region to be used by the client. This will be used to determine both the * service endpoint (eg: https://sns.us-west-1.amazonaws.com) and signing region (eg: us-west-1) * for requests. If neither region or endpoint configuration {@link #setEndpointConfiguration(EndpointConfiguration)} * are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * @param region Region to use */ public final void setRegion(String region) { withRegion(region); } /** * Sets the region to be used by the client. This will be used to determine both the * service endpoint (eg: https://sns.us-west-1.amazonaws.com) and signing region (eg: us-west-1) * for requests. If neither region or endpoint configuration {@link #setEndpointConfiguration(EndpointConfiguration)} * are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * <p> For regions not explicitly in the {@link Regions} enum use the {@link * #withRegion(String)} overload.</p> * * @param region Region to use * @return This object for method chaining. */ public final Subclass withRegion(Regions region) { return withRegion(region.getName()); } /** * Sets the region to be used by the client. This will be used to determine both the * service endpoint (eg: https://sns.us-west-1.amazonaws.com) and signing region (eg: us-west-1) * for requests. If neither region or endpoint configuration {@link #setEndpointConfiguration(EndpointConfiguration)} * are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * @param region Region to use * @return This object for method chaining. */ public final Subclass withRegion(String region) { return withRegion(getRegionObject(region)); } /** * Lookups the {@link Region} object for the given string region name. * * @param regionStr Region name. * @return Region object. * @throws SdkClientException If region cannot be found in the metadata. */ private Region getRegionObject(String regionStr) { Region regionObj = RegionUtils.getRegion(regionStr); if (regionObj == null) { throw new SdkClientException(String.format("Could not find region information for '%s' in SDK metadata.", regionStr)); } return regionObj; } /** * Sets the region to be used by the client. This will be used to determine both the * service endpoint (eg: https://sns.us-west-1.amazonaws.com) and signing region (eg: us-west-1) * for requests. If neither region or endpoint configuration {@link #setEndpointConfiguration(EndpointConfiguration)} * are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * @param region Region to use, this will be used to determine both service endpoint * and the signing region * @return This object for method chaining. */ private Subclass withRegion(Region region) { this.region = region; return getSubclass(); } /** * Gets the service endpointConfiguration in use by the builder */ public final EndpointConfiguration getEndpoint() { return endpointConfiguration; } /** * Sets the endpoint configuration (service endpoint & signing region) to be used for requests. If neither region {@link #setRegion(String)} * or endpoint configuration are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * <p><b>Only use this if using a non-standard service endpoint - the recommended approach for configuring a client is to use {@link #setRegion(String)}</b> * * @param endpointConfiguration The endpointConfiguration to use */ public final void setEndpointConfiguration(EndpointConfiguration endpointConfiguration) { withEndpointConfiguration(endpointConfiguration); } /** * Sets the endpoint configuration (service endpoint & signing region) to be used for requests. If neither region {@link #withRegion(String)} * or endpoint configuration are explicitly provided in the builder the {@link #DEFAULT_REGION_PROVIDER} is consulted. * * <p><b>Only use this if using a non-standard service endpoint - the recommended approach for configuring a client is to use {@link #withRegion(String)}</b> * * @param endpointConfiguration The endpointConfiguration to use * @return This object for method chaining. */ public final Subclass withEndpointConfiguration(EndpointConfiguration endpointConfiguration) { this.endpointConfiguration = endpointConfiguration; return getSubclass(); } /** * Gets the list of request handlers in use by the builder. */ public final List<RequestHandler2> getRequestHandlers() { return this.requestHandlers == null ? null : Collections.unmodifiableList(this.requestHandlers); } /** * Sets the request handlers to use in the client. * * @param handlers Request handlers to use for client. */ public final void setRequestHandlers(RequestHandler2... handlers) { this.requestHandlers = Arrays.asList(handlers); } /** * Sets the request handlers to use in the client. * * @param handlers Request handlers to use for client.<|fim▁hole|> return getSubclass(); } /** * Gets the {@link MonitoringListener} in use by the builder. */ public final MonitoringListener getMonitoringListener() { return this.monitoringListener; } /** * Sets a custom MonitoringListener to use for the client. * * @param monitoringListener Custom Monitoring Listener to use. */ public final void setMonitoringListener(MonitoringListener monitoringListener) { this.monitoringListener = monitoringListener; } /** * Sets a custom MonitoringListener to use for the client. * * @param monitoringListener Custom MonitoringListener to use. * @return This object for method chaining. */ public final Subclass withMonitoringListener(MonitoringListener monitoringListener) { setMonitoringListener(monitoringListener); return getSubclass(); } /** * Request handlers are copied to a new list to avoid mutation, if no request handlers are * provided to the builder we supply an empty list. */ private List<RequestHandler2> resolveRequestHandlers() { return (requestHandlers == null) ? new ArrayList<RequestHandler2>() : new ArrayList<RequestHandler2>(requestHandlers); } public CsmConfigurationProvider getClientSideMonitoringConfigurationProvider() { return csmConfig; } public void setClientSideMonitoringConfigurationProvider(CsmConfigurationProvider csmConfig) { this.csmConfig = csmConfig; } public Subclass withClientSideMonitoringConfigurationProvider( CsmConfigurationProvider csmConfig) { setClientSideMonitoringConfigurationProvider(csmConfig); return getSubclass(); } private CsmConfigurationProvider resolveClientSideMonitoringConfig() { return csmConfig == null ? DefaultCsmConfigurationProviderChain.getInstance() : csmConfig; } /** * Get the current value of an advanced config option. * @param key Key of value to get. * @param <T> Type of value to get. * @return Value if set, otherwise null. */ protected final <T> T getAdvancedConfig(AdvancedConfig.Key<T> key) { return advancedConfig.get(key); } /** * Sets the value of an advanced config option. * @param key Key of value to set. * @param value The new value. * @param <T> Type of value. */ protected final <T> void putAdvancedConfig(AdvancedConfig.Key<T> key, T value) { advancedConfig.put(key, value); } /** * Region and endpoint logic is tightly coupled to the client class right now so it's easier to * set them after client creation and let the normal logic kick in. Ideally this should resolve * the endpoint and signer information here and just pass that information as is to the client. * * @param clientInterface Client to configure */ @SdkInternalApi final TypeToBuild configureMutableProperties(TypeToBuild clientInterface) { AmazonWebServiceClient client = (AmazonWebServiceClient) clientInterface; setRegion(client); client.makeImmutable(); return clientInterface; } /** * Builds a client with the configure properties. * * @return Client instance to make API calls with. */ public abstract TypeToBuild build(); /** * @return An instance of AwsSyncClientParams that has all params to be used in the sync client * constructor. */ protected final AwsSyncClientParams getSyncClientParams() { return new SyncBuilderParams(); } protected final AdvancedConfig getAdvancedConfig() { return advancedConfig.build(); } private void setRegion(AmazonWebServiceClient client) { if (region != null && endpointConfiguration != null) { throw new IllegalStateException("Only one of Region or EndpointConfiguration may be set."); } if (endpointConfiguration != null) { client.setEndpoint(endpointConfiguration.getServiceEndpoint()); client.setSignerRegionOverride(endpointConfiguration.getSigningRegion()); } else if (region != null) { client.setRegion(region); } else { final String region = determineRegionFromRegionProvider(); if (region != null) { client.setRegion(getRegionObject(region)); } else { throw new SdkClientException( "Unable to find a region via the region provider chain. " + "Must provide an explicit region in the builder or setup environment to supply a region."); } } } /** * Attempt to determine the region from the configured region provider. This will return null in the event that the * region provider could not determine the region automatically. */ private String determineRegionFromRegionProvider() { try { return regionProvider.getRegion(); } catch (SdkClientException e) { // The AwsRegionProviderChain that is used by default throws an exception instead of returning null when // the region is not defined. For that reason, we have to support both throwing an exception and returning // null as the region not being defined. return null; } } @SuppressWarnings("unchecked") protected final Subclass getSubclass() { return (Subclass) this; } /** * Presents a view of the builder to be used in a client constructor. */ protected class SyncBuilderParams extends AwsAsyncClientParams { private final ClientConfiguration _clientConfig; private final AWSCredentialsProvider _credentials; private final RequestMetricCollector _metricsCollector; private final List<RequestHandler2> _requestHandlers; private final CsmConfigurationProvider _csmConfig; private final MonitoringListener _monitoringListener; private final AdvancedConfig _advancedConfig; protected SyncBuilderParams() { this._clientConfig = resolveClientConfiguration(); this._credentials = resolveCredentials(); this._metricsCollector = metricsCollector; this._requestHandlers = resolveRequestHandlers(); this._csmConfig = resolveClientSideMonitoringConfig(); this._monitoringListener = monitoringListener; this._advancedConfig = advancedConfig.build(); } @Override public AWSCredentialsProvider getCredentialsProvider() { return this._credentials; } @Override public ClientConfiguration getClientConfiguration() { return this._clientConfig; } @Override public RequestMetricCollector getRequestMetricCollector() { return this._metricsCollector; } @Override public List<RequestHandler2> getRequestHandlers() { return this._requestHandlers; } @Override public CsmConfigurationProvider getClientSideMonitoringConfigurationProvider() { return this._csmConfig; } @Override public MonitoringListener getMonitoringListener() { return this._monitoringListener; } @Override public AdvancedConfig getAdvancedConfig() { return _advancedConfig; } @Override public ExecutorService getExecutor() { throw new UnsupportedOperationException("ExecutorService is not used for sync client."); } } /** * A container for configuration required to submit requests to a service (service endpoint and signing region) */ public static final class EndpointConfiguration { private final String serviceEndpoint; private final String signingRegion; /** * @param serviceEndpoint the service endpoint either with or without the protocol (e.g. https://sns.us-west-1.amazonaws.com or sns.us-west-1.amazonaws.com) * @param signingRegion the region to use for SigV4 signing of requests (e.g. us-west-1) */ public EndpointConfiguration(String serviceEndpoint, String signingRegion) { this.serviceEndpoint = serviceEndpoint; this.signingRegion = signingRegion; } public String getServiceEndpoint() { return serviceEndpoint; } public String getSigningRegion() { return signingRegion; } } }<|fim▁end|>
* @return This object for method chaining. */ public final Subclass withRequestHandlers(RequestHandler2... handlers) { setRequestHandlers(handlers);
<|file_name|>controller.js<|end_file_name|><|fim▁begin|>angular.module('app', [])<|fim▁hole|><|fim▁end|>
.controller('Controller', function($scope) { $scope.hi = 'world'; });
<|file_name|>cache_configuration.js<|end_file_name|><|fim▁begin|>'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports.flushCacheConfiguration = exports.getCacheConfiguration = exports.loadCacheConfigurations = exports.setCacheConfiguration = undefined; var _stringify = require('babel-runtime/core-js/json/stringify'); var _stringify2 = _interopRequireDefault(_stringify); var _keys = require('babel-runtime/core-js/object/keys'); var _keys2 = _interopRequireDefault(_keys); var _typeof2 = require('babel-runtime/helpers/typeof'); var _typeof3 = _interopRequireDefault(_typeof2); var _promise = require('babel-runtime/core-js/promise'); var _promise2 = _interopRequireDefault(_promise); <|fim▁hole|> var _constants = require('../constants'); var _constants2 = _interopRequireDefault(_constants); var _stringToJson = require('string-to-json'); var _stringToJson2 = _interopRequireDefault(_stringToJson); var _flat = require('flat'); var _flat2 = _interopRequireDefault(_flat); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } // var settleVersioning = function (original, update) { // if (!original.versions) return true; // if (typeof original.versions.theme !== 'string' || typeof original.versions.reactadmin !== 'string') return true; // if (!update.versions) return true; // let themeOutofDate = (typeof update.versions.theme === 'string') ? semver.lt(original.versions.theme, update.versions.theme) : false; // let reactadminOutofDate = (typeof update.versions.reactadmin === 'string') ? semver.lt(original.versions.reactadmin, update.versions.reactadmin) : false; // return (themeOutofDate || reactadminOutofDate); // }; // var handleConfigurationAssigment = function (original, update) { // if (original && settleVersioning(original, update)) { // original = Object.assign({}, update); // } else if (!original) { // original = Object.assign({}, update); // } // return original; // }; // var handleConfigurationVersioning = function (data, type, options = {}) { // if (!type) throw new Error('Configurations must have a specified type'); // let configuration; // try { // configuration = JSON.parse(data.configuration) || {}; // } catch (e) { // configuration = {}; // } // configuration = flatten(configuration || {}, { safe: true, maxDepth: options.depth || 2, }); // if (options.multi === true) { // if (typeof type === 'string') { // configuration[type] = Object.keys(data).reduce((result, key) => { // result[key] = handleConfigurationAssigment(result[key], Object.assign(data[key].data.settings, { versions: data.versions, })); // return result; // }, configuration[type] || {}); // } else if (type && typeof type === 'object') { // configuration = Object.keys(data).reduce((result, key) => { // if (type[key]) result[type[key]] = handleConfigurationAssigment(result[type[key]], Object.assign(data[key].data.settings, { versions: data.versions, })); // return result; // }, configuration || {}); // } // } else { // configuration[type] = handleConfigurationAssigment(configuration[type], Object.assign(data.settings, { versions: data.versions, })); // } // return str2json.convert(configuration); // }; // import semver from 'semver'; var setCacheConfiguration = exports.setCacheConfiguration = function setCacheConfiguration(fn, type) { var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; return function () { var invoked = fn.apply(undefined, arguments); // if (invoked && typeof invoked.then === 'function' && typeof invoked.catch === 'function') { // return invoked // .then(result => { // let settings = result.data.settings; // return AsyncStorage.getItem(constants.cache.CONFIGURATION_CACHE) // .then(_result => { // _result = { configuration: _result, versions: result.data.versions, }; // if (options.multi) return Object.assign(_result, settings); // return Object.assign(_result, { settings: settings, }); // }, e => Promise.reject(e)); // }) // .then(result => handleConfigurationVersioning(result, type, options)) // .then(result => { // // console.log({ type, result, }); // return AsyncStorage.setItem(constants.cache.CONFIGURATION_CACHE, JSON.stringify(result)) // .then(() => result, e => Promise.reject(e)); // }) // .catch(e => Promise.reject(e)); // } return invoked; }; }; var loadCacheConfigurations = exports.loadCacheConfigurations = function loadCacheConfigurations() { return _serverSideReactNative.AsyncStorage.getItem(_constants2.default.cache.CONFIGURATION_CACHE).then(function (result) { try { return JSON.parse(result) || {}; } catch (e) { return {}; } }).catch(function (e) { return _promise2.default.reject(e); }); }; var getCacheConfiguration = exports.getCacheConfiguration = function getCacheConfiguration() { var actions = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; return function (dispatch) { return _serverSideReactNative.AsyncStorage.getItem(_constants2.default.cache.CONFIGURATION_CACHE).then(function (result) { try { return JSON.parse(result) || {}; } catch (e) { return {}; } }).then(function (result) { if (result.manifest) { if (result.manifest.authenticated && actions.manifest && actions.manifest.receivedManifestData) dispatch(actions.manifest.receivedManifestData(result.manifest.authenticated)); if (result.manifest.unauthenticated && actions.manifest && actions.manifest.unauthenticatedReceivedManifestData) dispatch(actions.manifest.unauthenticatedReceivedManifestData(result.manifest.unauthenticated)); } if (result.user) { if (result.user.preferences && actions.user && actions.user.preferenceSuccessResponse) { dispatch(actions.user.preferenceSuccessResponse({ data: { settings: result.user.preferences } })); } if (result.user.navigation && actions.user && actions.user.navigationSuccessResponse) { dispatch(actions.user.navigationSuccessResponse({ data: { settings: result.user.navigation } })); } } if (result.components) { if (result.components.login && actions.components && actions.components.setLoginComponent) { dispatch(actions.components.setLoginComponent({ data: { settings: result.components.login } })); } if (result.components.error && actions.components && actions.components.setErrorComponent) { dispatch(actions.components.setErrorComponent({ data: { settings: result.components.error } })); } if (result.components.main && actions.components && actions.components.setMainComponent) { dispatch(actions.components.setMainComponent({ data: { settings: result.components.main } })); } } }).catch(function (e) { return _promise2.default.reject(e); }); }; }; var flushCacheConfiguration = exports.flushCacheConfiguration = function flushCacheConfiguration(toRemove) { if (!toRemove) return _serverSideReactNative.AsyncStorage.removeItem(_constants2.default.cache.CONFIGURATION_CACHE); return _serverSideReactNative.AsyncStorage.getItem(_constants2.default.cache.CONFIGURATION_CACHE).then(function (result) { try { return (0, _flat2.default)(JSON.parse(result), { safe: true, maxDepth: 2 }) || {}; } catch (e) { return {}; } }).then(function (result) { if (typeof toRemove === 'string' && result[toRemove]) delete result[toRemove];else if (toRemove && (typeof toRemove === 'undefined' ? 'undefined' : (0, _typeof3.default)(toRemove)) === 'object') { (0, _keys2.default)(toRemove).forEach(function (key) { if (result[toRemove[key]]) delete result[toRemove[key]]; }); } return _serverSideReactNative.AsyncStorage.setItem(_constants2.default.cache.CONFIGURATION_CACHE, (0, _stringify2.default)(_stringToJson2.default.convert(result))); }).catch(function (e) { return _promise2.default.reject(e); }); };<|fim▁end|>
var _serverSideReactNative = require('./server-side-react-native');
<|file_name|>bitcoin_gl.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="gl" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About BillaryCoin</source> <translation>Acerca de BillaryCoin</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;BillaryCoin&lt;/b&gt; version</source> <translation>Versión &lt;b&gt;BillaryCoin&lt;/b&gt; .</translation> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The BillaryCoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Isto é software experimental. Distribuído baixo a licencia de software MIT/X11, véxase o arquivo que acompaña COPYING ou http://www.opensource.org/licenses/mit-license.php. Este produto inclúe software desenvolvido polo OpenSSL Project para o uso no OpenSSL Toolkit (http://www.openssl.org/) e software criptográfico escrito por Eric Young ([email protected]) e software UPnP escrito por Thomas Bernard.</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Libreta de direccións</translation> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Doble click para editar a dirección ou a etiqueta</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Crear unha nova dirección</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copiar a dirección seleccionada ao cartafol</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>&amp;Nova dirección.</translation> </message> <message> <location line="-46"/> <source>These are your BillaryCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Estas son as túas dIreccións de BillaryCoin para recibir os pagos. Pode que quieras asignarlle unha a cada remitente e así reconocer quen te está a pagar.</translation> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation>&amp;Copiar Dirección</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>Amosar &amp;QR Code</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a BillaryCoin address</source> <translation>Firma a mensaxe para probar que tes unha dirección BillaryCoin</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Firmar &amp;Mensaxe</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Borrar a dirección actualmente seleccionada da listaxe</translation> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified BillaryCoin address</source> <translation>Verifica a mensaxe para asegurar que foi asinada por unha concreta dirección de BillaryCoin</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensaxe.</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;Borrar</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation>Copiar &amp;Etiqueta</translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation>&amp;Modificar</translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation>Exportar datos da libreta de direccións.</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Arquivo separado por comas (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Error exportando</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Non se puido escribir a arquivo %1</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Dirección</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(sen etiqueta)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Diálogo de Contrasinal</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Introduce contrasinal</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Novo contrasinal</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repite novo contrasinal</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation>Para &quot;staking&quot; só</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Introduce o novo contrasinal ao moedeiro.&lt;br/&gt;Por favor empregue un contrasinal de &lt;b&gt;10 ou máis caracteres aleatorios&lt;/b&gt;, ou &lt;b&gt;oito ou máis palabras&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Encriptar moedeiro</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Esta operación precisa o contrasinal do teu moedeiro para desbloquear o moedeiro.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Desbloquear moedeiro</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Esta operación precisa o contrasinal do teu moedeiro para desencriptar o moedeiro.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Desencriptar moedeiro</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Cambiar contrasinal</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Introduce o vello e novo contrasinais no moedeiro.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Confirmar encriptación de moedeiro</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation>Coidado: Se enctriptas a tua carteira e perdes o contrasinal, &lt;b&gt;PERDERÁS TODAS AS TÚAS MOEDAS&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Estás seguro de que desexas encriptar o teu moedeiro?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANTE: Calquera copia de seguridade previa que fixeses do teu arquivo de moedeiro debería ser substituída polo recén xerado arquivo encriptado de moedeiro. Por razóns de seguridade, as copias de seguridade previas de un arquivo de moedeiro desencriptado tornaránse inútiles no momento no que comeces a emprega-lo novo, encriptado, moedeiro.</translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Precaución: A tecla de Bloqueo de Maiúsculas está activada!</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Moedeiro encriptado</translation> </message> <message> <location line="-58"/> <source>BillaryCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation>BillaryCoin pecharase agora para rematar o proceso de encriptación. Recorda que encriptar a túa carteira non te protexe na totalidade do roubo das tuas moedas por infeccións de malware no teu ordenador.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Encriptación de moedeiro fallida</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>A encriptación do moedeiro fallou por mor dun erro interno. O teu moedeiro non foi encriptado.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>Os contrasinais suministrados non coinciden.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>Desbloqueo de moedeiro fallido</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>O contrasinal introducido para a desencriptación do moedeiro foi incorrecto.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Desencriptación de moedeiro fallida</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Cambiouse con éxito o contrasinal do moedeiro.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+280"/> <source>Sign &amp;message...</source> <translation>&amp;Asinar mensaxe...</translation> </message> <message> <location line="+242"/> <source>Synchronizing with network...</source> <translation>Sincronizando coa rede...</translation> </message> <message> <location line="-308"/> <source>&amp;Overview</source> <translation>&amp;Vista xeral</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Amosar vista xeral do moedeiro</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Transacciones</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Navegar historial de transaccións</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation>&amp;Libreta de Direccións</translation> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation>Edita a lista de direccións e etiquetas almaceadas</translation> </message> <message> <location line="-13"/> <source>&amp;Receive coins</source> <translation>&amp;Recibe moedas</translation> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation>Amosa a lista de dirección para recibir os pagos</translation> </message> <message> <location line="-7"/> <source>&amp;Send coins</source> <translation>&amp;Enviar moedas</translation> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation>&amp;Saír</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Saír da aplicación</translation> </message> <message> <location line="+4"/> <source>Show information about BillaryCoin</source> <translation>Amosa información sobre BillaryCoin</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Acerca de &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Amosar información acerca de Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opcións...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Encriptar Moedeiro...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>Copia de &amp;Seguridade do Moedeiro...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;Cambiar contrasinal...</translation> </message> <message numerus="yes"> <location line="+250"/> <source>~%n block(s) remaining</source> <translation><numerusform>~%n bloque restante</numerusform><numerusform>~%n bloques restantes</numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation>Descargado %1 de %2 bloques de historial de transaccións (%3% feito).</translation> </message> <message> <location line="-247"/> <source>&amp;Export...</source> <translation>&amp;Exportar...</translation> </message> <message> <location line="-62"/> <source>Send coins to a BillaryCoin address</source> <translation>Enviar moedas a unha dirección BillaryCoin</translation> </message> <message> <location line="+45"/> <source>Modify configuration options for BillaryCoin</source> <translation>Modificar opcións de configuración para BillaryCoin</translation> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation>Exportar datos da pestana actual a un arquivo</translation> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation>Encriptar ou desencriptar carteira</translation> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation>Facer copia de seguridade do moedeiro noutra localización</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Cambiar o contrasinal empregado para a encriptación do moedeiro</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>Ventana de &amp;Depuración</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Abrir consola de depuración e diagnóstico</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;Verificar mensaxe...</translation> </message> <message> <location line="-200"/> <source>BillaryCoin</source> <translation>BillaryCoin</translation> </message> <message> <location line="+0"/> <source>Wallet</source> <translation>Moedeiro</translation> </message> <message> <location line="+178"/> <source>&amp;About BillaryCoin</source> <translation>&amp;Sobre BillaryCoin</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;Amosar/Agachar</translation> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation>Desbloquear carteira</translation> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation>&amp;Bloquear Carteira</translation> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation>Bloquear carteira</translation> </message> <message> <location line="+34"/> <source>&amp;File</source> <translation>&amp;Arquivo</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>Axus&amp;tes</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>A&amp;xuda</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Barra de ferramentas</translation> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+0"/> <location line="+60"/> <source>BillaryCoin client</source> <translation>Cliente BillaryCoin</translation> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to BillaryCoin network</source> <translation><numerusform>%n conexión activa á red de BillaryCoin</numerusform><numerusform>%n conexións activas á red de BillaryCoin</numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation>Descargados %1 bloques do historial de transaccións.</translation> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation>&quot;Staking&quot;.&lt;br&gt;O teu peso na rede é %1&lt;br&gt;O peso da rede é %2&lt;br&gt;Tempo esperado para gañar a recompensa %3</translation> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation>Non &quot;staking&quot; porque a carteira está bloqueada</translation> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation>Non &quot;staking&quot; porque a carteira está sen conexión</translation> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation>Non &quot;staking&quot; porque a carteira está a sincronizar.</translation> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation>Non &quot;staking&quot; porque non tes moedas maduras</translation> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation><numerusform>Fai %n segundo</numerusform><numerusform>Fai %n segundos</numerusform></translation> </message> <message> <location line="-284"/> <source>&amp;Unlock Wallet...</source> <translation>&amp;Desbloquear Carteira...</translation> </message> <message numerus="yes"> <location line="+288"/> <source>%n minute(s) ago</source> <translation><numerusform>Fai %n minuto</numerusform><numerusform>Fai %n minutos</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation><numerusform>Fai %n hora</numerusform><numerusform>Fai %n horas</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation><numerusform>Fai %n día</numerusform><numerusform>Fai %n días</numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>Actualizado</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>Poñendo ao día...</translation> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation>Último bloque recibido foi generado %1.</translation> </message> <message> <location line="+59"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation>Confirmar cuota da transacción</translation> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Transacción enviada</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Transacción entrante</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Cantidade: %2 Tipo: %3 Dirección: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid BillaryCoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>O moedeiro está &lt;b&gt;encriptado&lt;/b&gt; e actualmente &lt;b&gt;desbloqueado&lt;/b&gt;</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>O moedeiro está &lt;b&gt;encriptado&lt;/b&gt; e actualmente &lt;b&gt;bloqueado&lt;/b&gt;</translation> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n día</numerusform><numerusform>%n días</numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation>Non &quot;staking&quot;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. BillaryCoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation>Alerta de Rede</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation>Cantidade:</translation> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>Importe:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+48"/> <source>Fee:</source> <translation>Pago:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation>non</translation> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change:</source> <translation>Cambiar:</translation> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation>(des)selecciona todo</translation> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation>Modo árbore</translation> </message> <message> <location line="+16"/> <source>List mode</source> <translation>Modo lista</translation> </message> <message> <location line="+45"/> <source>Amount</source> <translation>Cantidade</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Dirección</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation>Confirmacións</translation> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>Confirmado</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation>Prioridade</translation> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation>Copiar dirección</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar etiqueta</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>Copiar cantidade</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>Copiar ID de transacción</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation>Copiar cantidade</translation> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation>Copiar pago</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Copiar despóis do pago</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Copiar prioridade</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar cambio</translation> </message> <message> <location line="+317"/> <source>highest</source> <translation>O máis alto</translation> </message> <message> <location line="+1"/> <source>high</source> <translation>alto</translation> </message> <message> <location line="+1"/> <source>medium-high</source> <translation>medio-alto</translation> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>low-medium</source> <translation>medio-baixo</translation> </message> <message> <location line="+1"/> <source>low</source> <translation>baixo</translation> </message> <message> <location line="+1"/> <source>lowest</source> <translation>o máis baixo</translation> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation>Si</translation> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;.<|fim▁hole|> This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(sen etiqueta)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>(change)</source> <translation>(cambio)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Modificar Dirección</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Etiqueta</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Dirección</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation>Nova dirección para recibir</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Nova dirección para enviar</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Modificar dirección para recibir</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Modificar dirección para enviar</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>A dirección introducida &quot;%1&quot; xa está no libro de direccións.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid BillaryCoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Non se puido desbloquear o moedeiro.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>A xeración de nova clave fallou.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>BillaryCoin-Qt</source> <translation>BillaryCoin-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Opcións UI</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Comezar minimizado</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opcións</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Pagar &amp;tarifa da transacción</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation>Reserva</translation> </message> <message> <location line="+31"/> <source>Automatically start BillaryCoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start BillaryCoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Rede</translation> </message> <message> <location line="+6"/> <source>Automatically open the BillaryCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapear porto empregando &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the BillaryCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP do Proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Porto:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Porto do proxy (exemplo: 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Version de SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Versión SOCKS del proxy (exemplo: 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Xanela</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Amosar so un icono na bandexa tras minimiza-la xanela.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimizar á bandexa en lugar de á barra de tarefas.</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimizar en lugar de saír da aplicación cando se pecha a xanela. Cando se habilita esta opción, a aplicación so se pechará tras seleccionar Saír no menú.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimizar ao pechar</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Visualización</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>&amp;Linguaxe de interface de usuario:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting BillaryCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unidade na que amosar as cantidades:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Escolle a unidade de subdivisión por defecto para amosar na interface e ao enviar moedas.</translation> </message> <message> <location line="+9"/> <source>Whether to show BillaryCoin addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>&amp;Visualizar direccións na listaxe de transaccións</translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Cancelar</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>por defecto</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting BillaryCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>A dirección de proxy suministrada é inválida.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulario</translation> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the BillaryCoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>Sen confirmar:</translation> </message> <message> <location line="-107"/> <source>Wallet</source> <translation>Moedeiro</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>O teu balance actualmente dispoñible</translation> </message> <message> <location line="+71"/> <source>Immature:</source> <translation>Inmaduro:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>O balance minado todavía non madurou</translation> </message> <message> <location line="+20"/> <source>Total:</source> <translation>Total:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>O teu balance actual total</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Transaccións recentes&lt;/b&gt;</translation> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation>non sincronizado</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nome do cliente</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation>N/A</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Versión do cliente</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Información</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Usar versión OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Tempo de arranque</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Rede</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Número de conexións</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Cadea de bloques</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Número actual de bloques</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Bloques totais estimados</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Hora do último bloque</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Abrir</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the BillaryCoin-Qt help message to get a list with possible BillaryCoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consola</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Data de construción</translation> </message> <message> <location line="-104"/> <source>BillaryCoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>BillaryCoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Arquivo de log de depuración</translation> </message> <message> <location line="+7"/> <source>Open the BillaryCoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Limpar consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the BillaryCoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Emprega as flechas arriba e abaixo para navegar polo historial, e &lt;b&gt;Ctrl-L&lt;/b&gt; para limpar a pantalla.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Escribe &lt;b&gt;axuda&lt;/b&gt; para unha vista xeral dos comandos dispoñibles.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Moedas Enviadas</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation>Cantidade:</translation> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>Importe:</translation> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 BC</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation>Prioridade:</translation> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation>Pago:</translation> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Enviar a múltiples receptores á vez</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Engadir &amp;Receptor</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Limpar &amp;Todo</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>Balance:</translation> </message> <message> <location line="+16"/> <source>123.456 BC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Confirmar a acción de envío</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Enviar</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a BillaryCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation>Copiar cantidade</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar cantidade</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation>Copiar pago</translation> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation>Copiar despóis do pago</translation> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation>Copiar prioridade</translation> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation>Copiar cambio</translation> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirmar envío de moedas</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>A dirección de recepción non é válida, por favor compróbea.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>A cantidade a pagar debe ser maior que 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>A cantidade sobrepasa o teu balance.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>O total sobrepasa o teu balance cando se inclúe a tarifa de transacción %1.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Atopouse dirección duplicada, so se pode enviar a cada dirección unha vez por operación.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid BillaryCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(sen etiqueta)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>&amp;Cantidade:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>Pagar &amp;A:</translation> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation>Introduce unha etiqueta para esta dirección para engadila ao teu libro de direccións</translation> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation>&amp;Etiqueta:</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Pegar dirección dende portapapeis</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a BillaryCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Sinaturas - Asinar / Verificar unha Mensaxe</translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>&amp;Asinar Mensaxe</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Podes asinar mensaxes coas túas direccións para probar que ti as posees. Ten conta de non asinar nada vago, xa que hai ataques de phishing que tentarán que asines coa túa identidade por riba deles. Asina únicamente declaracións totalmente detalladas coas que esteas de acordo.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Pegar dirección dende portapapeis</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Introduce a mensaxe que queres asinar aquí</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiar a sinatura actual ao portapapeis do sistema</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this BillaryCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>Restaurar todos os campos de sinatura de mensaxe</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Limpar &amp;Todo</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensaxe</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introduce a dirección coa que asinar, a mensaxe (asegúrate de copiar exactamente os saltos de liña, espacios, tabulacións, etc.) e a sinatura debaixo para verificar a mensaxe. Ten coidado de non ler máis na sinatura do que hai no mensaxe asinado mesmo, a fin de evitar ser cazado nun ataque de home no medio.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified BillaryCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>Restaurar todos os campos de verificación de mensaxe</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a BillaryCoin address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Click en &quot;Asinar Mensaxe&quot; para xerar sinatura</translation> </message> <message> <location line="+3"/> <source>Enter BillaryCoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>A dirección introducida é inválida.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Por favor comproba a dirección e proba de novo.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>A dirección introducida non se refire a ninguna clave.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>Cancelouse o desbloqueo do moedeiro.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>A clave privada da dirección introducida non está dispoñible.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Fallou a sinatura da mensaxe.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mensaxe asinada.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>A sinatura non puido ser decodificada.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Por favor revise a sinatura e probe de novo.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>A sinatura non coincide co resumo da mensaxe.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>A verificación da mensaxe fallou.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mensaxe verificada.</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation>Aberto ata %1</translation> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/fóra de liña</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/sen confirmar</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confirmacións</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Estado</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, propagado a % nodo</numerusform><numerusform>, propagado a % nodos</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Orixe</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Xerado</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Dende</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>A</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>dirección propia</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>etiqueta</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Crédito</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>madura nun bloque máis</numerusform><numerusform>madura en %n bloques máis</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>non aceptado</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Débito</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Tarifa de transacción</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Cantidade neta</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mensaxe</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentario</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID de Transacción</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 20 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Información de depuración</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transacción</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>Entradas</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Cantidade</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>verdadeiro</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>falso</translation> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation>, non foi propagado con éxito todavía</translation> </message> <message> <location line="+35"/> <source>unknown</source> <translation>descoñecido</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detalles de transacción</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Este panel amosa unha descripción detallada da transacción</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Dirección</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Cantidade</translation> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation>Aberto ata %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmado (%1 confirmacións)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation><numerusform>Abrir para %n bloque máis</numerusform><numerusform>Abrir para %n bloques máis</numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Este bloque non foi recibido por ningún outro nodo e probablemente non será aceptado!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Xerado pero non aceptado</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>Recibido con</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Recibido de</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Enviado a</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Pago a ti mesmo</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Minado</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/a)</translation> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Estado da transacción. Pasa por riba deste campo para amosar o número de confirmacións.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data e hora na que foi recibida a transacción.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipo de transacción.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Dirección de destino da transacción.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Cantidade borrada ou engadida no balance.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation>Todo</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Hoxe</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Esta semana</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Este mes</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>O último mes</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Este ano</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Periodo...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Recibido con</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Enviado a</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>A ti mesmo</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Minado</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Outro</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Introduce dirección ou etiqueta para buscar</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Cantidade mínima</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiar dirección</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar etiqueta</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar cantidade</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiar ID de transacción</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Modificar etiqueta</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Amosar detalles da transacción</translation> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Arquivo separado por comas (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmado</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Dirección</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Cantidade</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Periodo:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>a</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>BillaryCoin version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>Emprego:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or billarycoind</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation>Listar comandos</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>Obter axuda para un comando</translation> </message> <message> <location line="+2"/> <source>Options:</source> <translation>Opcións:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: billarycoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: billarycoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation>Especificar arquivo do moedeiro (dentro do directorio de datos)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Especificar directorio de datos</translation> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Fixar tamaño da caché da base de datos en megabytes (por defecto: 25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 6791 or testnet: 16791)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Manter como moito &lt;n&gt; conexións con pares (por defecto: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Conectar a nodo para recuperar direccións de pares, e desconectar</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>Especificar a túa propia dirección pública</translation> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Umbral para desconectar pares con mal comportamento (por defecto: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Número de segundos para manter sen reconectar aos pares con mal comportamento (por defecto: 86400)</translation> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Ocorreu un erro mentres se establecía o porto RPC %u para escoitar sobre IPv4: %s</translation> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 6792 or testnet: 16792)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation>Aceptar liña de comandos e comandos JSON-RPC</translation> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation>Executar no fondo como un demo e aceptar comandos</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>Empregar a rede de proba</translation> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Aceptar conexións de fóra (por defecto: 1 se non -proxy ou -connect)</translation> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Ocorreu un erro mentres se establecía o porto RPC %u para escoitar sobre IPv6, voltando a IPv4: %s</translation> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Precaución: -paytxfee está posto moi algo! Esta é a tarifa de transacción que ti pagarás se envías unha transacción.</translation> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong BillaryCoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Precaución: erro lendo wallet.dat! Tódalas claves lidas correctamente, pero os datos de transacción ou as entradas do libro de direccións podrían estar ausentes ou incorrectos.</translation> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Precaución: wallet.dat corrupto, datos salvagardados! O wallet.dat orixinal foi gardado como wallet.{timestamp}.bak en %s; se o teu balance ou transaccións son incorrectas deberías restauralas dende unha copia de seguridade.</translation> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Tentar recuperar claves privadas dende un wallet.dat corrupto</translation> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation>Opcións de creación de bloque:</translation> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation>Conectar so ao(s) nodo(s) especificado(s)</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descobrir dirección IP propia (por defecto: 1 se á escoita e non -externalip)</translation> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Fallou escoitar en calquera porto. Emprega -listen=0 se queres esto.</translation> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Máximo buffer por-conexión para recibir, &lt;n&gt;*1000 bytes (por defecto: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Máximo buffer por-conexión para enviar, &lt;n&gt;*1000 bytes (por defecto: 1000)</translation> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Conectar so a nodos na rede &lt;net&gt; (IPv4, IPv6 ou Tor)</translation> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Opcións SSL: (ver ńa Wiki Bitcoin as instrucción de configuración de SSL)</translation> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Enviar traza/información de depuración á consola en lugar de ao arquivo debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Fixar tamaño mínimo de bloque en bytes (por defecto: 0)</translation> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Recortar o arquivo debug.log ao arrancar o cliente (por defecto: 1 cando no-debug)</translation> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Especificar tempo límite da conexión en milisegundos (por defecto: 5000)</translation> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Usar UPnP para mapear o porto de escoita (por defecto: 0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Usar UPnP para mapear o porto de escoita (por defecto: 1 se á escoita)</translation> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation>Nome de usuario para conexións JSON-RPC</translation> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Precaución: Esta versión é obsoleta, precísase unha actualización!</translation> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corrupto, fallou o gardado</translation> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation>Contrasinal para conexións JSON-RPC</translation> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=billarycoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;BillaryCoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permitir conexións JSON-RPC dende direccións IP especificadas</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Enviar comandos a nodo executando na &lt;ip&gt; (por defecto: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Executar comando cando o mellor bloque cambie (%s no comando é sustituído polo hash do bloque)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executar comando cando unha transacción do moedeiro cambia (%s no comando é substituído por TxID)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>Actualizar moedeiro ao formato máis recente</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Fixar tamaño do pool de claves a &lt;n&gt; (por defecto: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Rescanear transaccións ausentes na cadea de bloques</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Empregar OpenSSL (https) para conexións JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>Arquivo de certificado do servidor (por defecto: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Clave privada do servidor (por defecto: server.perm)</translation> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation>Esta mensaxe de axuda</translation> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. BillaryCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>BillaryCoin</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Imposible enlazar con %s neste ordenador (enlace devolveu erro %d, %s)</translation> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permitir lookup de DNS para -addnote, -seednote e -connect</translation> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation>Cargando direccións...</translation> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Erro cargando wallet.dat: Moedeiro corrupto</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of BillaryCoin</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart BillaryCoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>Erro cargando wallet.dat</translation> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Dirección -proxy inválida: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Rede descoñecida especificada en -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Versión solicitada de proxy -socks descoñecida: %i</translation> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Non se pode resolver a dirección -bind: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Non se pode resolver dirección -externalip: &apos;%s&apos;</translation> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Cantidade inválida para -paytxfee=&lt;cantidade&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>Cantidade inválida</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>Fondos insuficientes</translation> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation>Cargando índice de bloques...</translation> </message> <message> <location line="-103"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Engadir un nodo ao que conectarse e tentar manter a conexión aberta</translation> </message> <message> <location line="+122"/> <source>Unable to bind to %s on this computer. BillaryCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation>Cargando moedeiro...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>Non se pode desactualizar o moedeiro</translation> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>Non se pode escribir a dirección por defecto</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>Rescaneando...</translation> </message> <message> <location line="+5"/> <source>Done loading</source> <translation>Carga completa</translation> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation>Empregar a opción %s</translation> </message> <message> <location line="+14"/> <source>Error</source> <translation>Erro</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Debes fixar rpcpassword=&lt;contrasinal&gt; no arquivo de configuración: %s Se o arquivo non existe, debes crealo con permisos de so lectura para o propietario.</translation> </message> </context> </TS><|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># browsershots.org - Test your web design in different browsers # Copyright (C) 2007 Johann C. Rocholl <[email protected]> # # Browsershots is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Browsershots is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ ShotFactory library. """ __revision__ = "$Rev: 2006 $"<|fim▁hole|><|fim▁end|>
__date__ = "$Date: 2007-08-20 06:02:52 +0530 (Mon, 20 Aug 2007) $" __author__ = "$Author: johann $"
<|file_name|>binary_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2012-2013 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #<|fim▁hole|># See the License for the specific language governing permissions and # limitations under the License. import sparkey import tempfile import os import binascii import unittest keys = """ a7cb5f92f019fda84d5dd73c257d6f724402d56a 0fae6c3bec0e162343afee39009c8b7e7ad77747 1bff07d74a2080e1ce2b90b12f30f581f993b56f d04d6442f15527716e89d012018718d124ac5897 7b3605d73c5426f0600acd73535c1a7c96c4ffb9 23c7102024d4aeb4b641db7370083a87586dea43 3fa47cce74af2e39a67d3bf559d8ba2c81688963 280ed99d30b701b97d436b3ac57231e9e38e8a4a 6706a6c6c7ea2f4cfe1eb8dd786427675c4cbb4b a8a39e52b08763ce1610400f0e789b798e89b885 2d70d150c52804485bc04367155ae4a2ff89768f 28547a874f734dc7062c859e8409a39d7903f9f1 8906ee2fcc0f62f782a9c95557bb785e9145cc33 cec120769a81c544ff171ff21c5b66217103f038 f6a714ad3b43963fe38ab3541286f9440ae96d16 a715a608f9baf1c26e0c59c72592a2b19412270b 30f7286d1100f4c115add1df87312e00a6b71012 059c6aa8b39796b9e6c10a70ac84a209eeed3c81 f9f982ba4ea5906e455cef05036700948ed4c576 """.split('\n') class TestBinary(unittest.TestCase): def setUp(self): self.logfile = tempfile.mkstemp()[1] self.hashfile = tempfile.mkstemp()[1] def tearDown(self): os.remove(self.logfile) os.remove(self.hashfile) def test_binary(self): writer = sparkey.HashWriter(self.hashfile, self.logfile) for key in keys: writer.put(binascii.unhexlify(key), 'value') writer.close() reader = sparkey.HashReader(self.hashfile, self.logfile) for key in keys: self.assertEqual('value', reader[binascii.unhexlify(key)]) reader.close()<|fim▁end|>
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<|file_name|>testSICKSlowMetropolis.cpp<|end_file_name|><|fim▁begin|>/** * @brief Tests a slow version of MCMC for Occupancy grids * @author Brian Peasley * @date ? */ #include "OccupancyGrid/MCMC.h" #include "OccupancyGrid/cvmat_serialization.h" #include "OccupancyGrid/visualiser.h" #include "OccupancyGrid/loadOccupancyGrid.h" #include <opencv2/opencv.hpp> #include <boost/program_options.hpp> namespace po = boost::program_options; using namespace std; using namespace gtsam; Visualiser global_vis_; /// Main int main(int argc, char *argv[]) { cv::namedWindow("c", cv::WINDOW_NORMAL); // parse arguments /////////////////////////////////////////// // Declare the supported options. po::options_description desc("Run dual decomposition"); desc.add_options() ("help", "produce help message") ("width", po::value<double>(), "Width") ("height", po::value<double>(), "Height") ("resolution", po::value<double>()->required(), "Size of square cell in the map") ("dir", po::value<std::string>()->default_value("Data/player_sim_with_reflectance"), "Data directory") ("clock", po::value<double>()->default_value(400), "Max clock") ; <|fim▁hole|> pos.add("resolution", 1); po::variables_map vm; po::store(po::command_line_parser(argc, argv).options(desc).positional(pos).run(), vm); po::notify(vm); std::string directory = vm["dir"].as<std::string>(); double max_clock = CLOCKS_PER_SEC * vm["clock"].as<double>(); // end of parse arguments //////////////////////////////////// OccupancyGrid occupancyGrid = loadOccupancyGrid(vm); global_vis_.init(occupancyGrid.height(), occupancyGrid.width()); //run metropolis OccupancyGrid::Marginals occupancyMarginals = runSlowMetropolis(occupancyGrid, 300000, max_clock); // write the result char marginalsOutput[1000]; sprintf(marginalsOutput, "Data/Metropolis_Marginals.txt"); FILE* fptr = fopen(marginalsOutput, "w"); fprintf(fptr, "%lu %lu\n", occupancyGrid.width(), occupancyGrid.height()); for (size_t i = 0; i < occupancyMarginals.size(); i++) { fprintf(fptr, "%lf ", occupancyMarginals[i]); } fclose(fptr); std::stringstream ss; ss << directory << "/SICKSlowMetropolis.png"; global_vis_.save(ss.str()); }<|fim▁end|>
po::positional_options_description pos;
<|file_name|>release.js<|end_file_name|><|fim▁begin|>module.exports = function (grunt) { var packageVersion = require('../../package.json').version; <|fim▁hole|> grunt.registerTask('updateRelease', '', function () { packageVersion = require('../../package.json').version; }); // Maintainers: Run prior to a release. Includes SauceLabs VM tests. grunt.registerTask('release', 'Release a new version, push it and publish it', function () { grunt.log.write('Welcome to the FUEL UX Release process.\n'); grunt.log.write('\n'); grunt.log.write('Please review and complete prerequisites in RELEASE.md.\n'); grunt.log.write('\n'); grunt.log.write('Please make sure you are not on VPN.\n'); grunt.log.write('\n'); // default variables for release task var releaseDefaults = { release: { files: ['dist', 'README.md', 'CONTRIBUTING.md', 'bower.json', 'package.json', 'reference/dist'], localBranch: 'release', remoteBaseBranch: 'master', remoteDestinationBranch: '3.x', remoteRepository: 'upstream' } }; // add releaseDefaults grunt.config.merge(releaseDefaults); if (!grunt.file.exists('SAUCE_API_KEY.yml')) { grunt.log.write('The file SAUCE_API_KEY.yml is needed in order to run tests in SauceLabs.'.red.bold + ' Please contact another maintainer to obtain this file.'); } if (typeof grunt.config('cdnLoginFile') === 'undefined') { grunt.log.write('The file FUEL_CDN.yml is needed in order to upload the release files to the CDN.'.red.bold + ' Please contact another maintainer to obtain this file.'); } // update local variable to make sure build prompt is using temp branch's package version grunt.task.run( [ 'prompt:logoffvpn', 'prompt:rannpminstall', 'prompt:rangrunttest', 'prompt:ransauce', 'prompt:createmilestone', 'prompt:bumpmilestones', 'prompt:closemilestone', 'prompt:startrelease', 'prompt:tempbranch', 'shell:checkoutRemoteReleaseBranch', 'updateRelease', 'prompt:build', 'dorelease' ] ); }); grunt.registerTask('dorelease', '', function () { grunt.log.writeln(''); if (!grunt.option('no-tests')) { grunt.task.run(['releasetest']); // If phantom timeouts happening because of long-running qunit tests, look into setting `resourceTimeout` in phantom: http://phantomjs.org/api/webpage/property/settings.html // Delete any screenshots that may have happened if it got this far. This isn't foolproof // because it relies on the phantomjs server/page timeout, which can take longer than this // grunt task depending on how long saucelabs takes to run... grunt.task.run('clean:screenshots'); } grunt.config('banner', '<%= bannerRelease %>'); // Run dist again to grab the latest version numbers. Yeah, we're running it twice... ¯\_(ツ)_/¯ grunt.task.run([ 'bump-only:' + grunt.config('release.buildSemVerType'), 'replace:readme', 'replace:packageJs', 'dist', 'shell:addReleaseFiles', 'shell:copyToReference', 'prompt:commit', 'prompt:tag', 'prompt:pushLocalBranchToUpstream', 'prompt:pushTagToUpstream', 'prompt:uploadToCDN', 'prompt:pushLocalBranchToUpstreamMaster', 'shell:publishToNPM', 'prompt:generatelogs' ]); }); };<|fim▁end|>
/* ------------- RELEASE ------------- */ grunt.registerTask('notes', 'Run a ruby gem that will request from Github unreleased pull requests', ['prompt:generatelogsmanually']);
<|file_name|>SolverException.java<|end_file_name|><|fim▁begin|>package edu.cmu.cs.dickerson.kpd.solver.exception; <|fim▁hole|> public SolverException(String message) { super(message); } }<|fim▁end|>
public class SolverException extends Exception { private static final long serialVersionUID = 1L;
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Unicode utilities useful for text editing, including a line breaking iterator. mod tables; use tables::*; pub fn linebreak_property(cp: char) -> u8 { let cp = cp as usize; if cp < 0x800 { LINEBREAK_1_2[cp] } else if cp < 0x10000 { let child = LINEBREAK_3_ROOT[cp >> 6]; LINEBREAK_3_CHILD[(child as usize) * 0x40 + (cp & 0x3f)] } else { let mid = LINEBREAK_4_ROOT[cp >> 12]; let leaf = LINEBREAK_4_MID[(mid as usize) * 0x40 + ((cp >> 6) & 0x3f)]; LINEBREAK_4_LEAVES[(leaf as usize) * 0x40 + (cp & 0x3f)] } } // Return property, length // May panic if ix doesn't point to a valid character in the string pub fn linebreak_property_str(s: &str, ix: usize) -> (u8, usize) { let b = s.as_bytes()[ix]; if b < 0x80 { (LINEBREAK_1_2[b as usize], 1) } else if b < 0xe0 { // 2 byte UTF-8 sequences let cp = ((b as usize) << 6) + (s.as_bytes()[ix + 1] as usize) - 0x3080; (LINEBREAK_1_2[cp], 2) } else if b < 0xf0 { // 3 byte UTF-8 sequences let mid_ix = ((b as usize) << 6) + (s.as_bytes()[ix + 1] as usize) - 0x3880; let mid = LINEBREAK_3_ROOT[mid_ix]; (LINEBREAK_3_CHILD[(mid as usize) * 0x40 + (s.as_bytes()[ix + 2] as usize) - 0x80], 3) } else { // 4 byte UTF-8 sequences let mid_ix = ((b as usize) << 6) + (s.as_bytes()[ix + 1] as usize) - 0x3c80; let mid = LINEBREAK_4_ROOT[mid_ix]; let leaf_ix = ((mid as usize) << 6) + (s.as_bytes()[ix + 2] as usize) - 0x80; let leaf = LINEBREAK_4_MID[leaf_ix]; (LINEBREAK_4_LEAVES[(leaf as usize) * 0x40 + (s.as_bytes()[ix + 3] as usize) - 0x80], 4) } } /// An iterator which produces line breaks according to the UAX 14 line /// breaking algorithm. For each break, return a tuple consisting of the offset /// within the source string and a bool indicating whether it's a hard break. #[derive(Copy, Clone)] pub struct LineBreakIterator<'a> { s: &'a str, ix: usize, state: u8, } impl<'a> Iterator for LineBreakIterator<'a> { type Item = (usize, bool); // return break pos and whether it's a hard break fn next(&mut self) -> Option<(usize, bool)> { loop { if self.ix > self.s.len() { return None; } else if self.ix == self.s.len() { // LB3, break at EOT self.ix += 1; return Some((self.s.len(), true)); } let (lb, len) = linebreak_property_str(self.s, self.ix); let i = (self.state as usize) * N_LINEBREAK_CATEGORIES + (lb as usize); let new = LINEBREAK_STATE_MACHINE[i]; //println!("\"{}\"[{}], state {} + lb {} -> {}", &self.s[self.ix..], self.ix, self.state, lb, new); let result = self.ix; self.ix += len; if (new as i8) < 0 { // break found self.state = new & 0x3f; return Some((result, new >= 0xc0)); } else { self.state = new; } } } } impl<'a> LineBreakIterator<'a> { /// Create a new iterator for the given string slice. pub fn new(s: &str) -> LineBreakIterator { if s.is_empty() { LineBreakIterator { s, ix: 1, // LB2, don't break; sot takes priority for empty string state: 0, } } else { let (lb, len) = linebreak_property_str(s, 0); LineBreakIterator { s, ix: len, state: lb, } } } } /// A class (TODO, not right word) useful for computing line breaks in a rope or /// other non-contiguous string representation. This is a trickier problem than /// iterating in a string for a few reasons, the trickiest of which is that in /// the general case, line breaks require an indeterminate amount of look-behind. /// /// This is something of an "expert-level" interface, and should only be used if /// the caller is prepared to respect all the invariants. Otherwise, you might /// get inconsistent breaks depending on start positiona and leaf boundaries. #[derive(Copy, Clone)] pub struct LineBreakLeafIter { ix: usize, state: u8, } impl Default for LineBreakLeafIter { // A default value. No guarantees on what happens when next() is called // on this. Intended to be useful for empty ropes. fn default() -> LineBreakLeafIter { LineBreakLeafIter { ix: 0, state: 0, } } } impl LineBreakLeafIter { /// Create a new line break iterator suitable for leaves in a rope. /// Precondition: ix references a codepoint in s (implies s is not empty). pub fn new(s: &str, ix: usize) -> LineBreakLeafIter { let (lb, len) = linebreak_property_str(s, ix); LineBreakLeafIter { ix: ix + len, state: lb, } } /// Return break pos and whether it's a hard break. Note: hard break /// indication may go away, this may not be useful in actual application. /// If end of leaf is found, return leaf's len. This does not indicate /// a break, as that requires at least one more codepoint of context. /// If it is a break, then subsequent next call will return an offset of 0. /// EOT is always a break, so in the EOT case it's up to the caller /// to figure that out. /// /// For consistent results, always supply same `s` until end of leaf is /// reached (and initially this should be the same as in the `new` call). pub fn next(&mut self, s: &str) -> (usize, bool) { loop { if self.ix == s.len() { self.ix = 0; // in preparation for next leaf return (s.len(), false); } let (lb, len) = linebreak_property_str(s, self.ix); let i = (self.state as usize) * N_LINEBREAK_CATEGORIES + (lb as usize); let new = LINEBREAK_STATE_MACHINE[i]; //println!("\"{}\"[{}], state {} + lb {} -> {}", &s[self.ix..], self.ix, self.state, lb, new); let result = self.ix; self.ix += len; if (new as i8) < 0 { // break found self.state = new & 0x3f; return (result, new >= 0xc0); } else { self.state = new; } } } } #[cfg(test)] mod tests { use linebreak_property; use linebreak_property_str; use LineBreakIterator; #[test] fn linebreak_prop() { assert_eq!( 9, linebreak_property('\u{0001}')); assert_eq!( 9, linebreak_property('\u{0003}')); assert_eq!( 9, linebreak_property('\u{0004}')); assert_eq!( 9, linebreak_property('\u{0008}')); assert_eq!(10, linebreak_property('\u{000D}')); assert_eq!( 9, linebreak_property('\u{0010}')); assert_eq!( 9, linebreak_property('\u{0015}')); assert_eq!( 9, linebreak_property('\u{0018}')); assert_eq!(22, linebreak_property('\u{002B}')); assert_eq!(16, linebreak_property('\u{002C}')); assert_eq!(13, linebreak_property('\u{002D}')); assert_eq!(27, linebreak_property('\u{002F}')); assert_eq!(19, linebreak_property('\u{0030}')); assert_eq!(19, linebreak_property('\u{0038}')); assert_eq!(19, linebreak_property('\u{0039}')); assert_eq!(16, linebreak_property('\u{003B}')); assert_eq!( 2, linebreak_property('\u{003E}')); assert_eq!(11, linebreak_property('\u{003F}')); assert_eq!( 2, linebreak_property('\u{0040}')); assert_eq!( 2, linebreak_property('\u{0055}')); assert_eq!( 2, linebreak_property('\u{0056}')); assert_eq!( 2, linebreak_property('\u{0058}')); assert_eq!( 2, linebreak_property('\u{0059}')); assert_eq!(20, linebreak_property('\u{005B}')); assert_eq!(22, linebreak_property('\u{005C}')); assert_eq!( 2, linebreak_property('\u{0062}')); assert_eq!( 2, linebreak_property('\u{006C}')); assert_eq!( 2, linebreak_property('\u{006D}')); assert_eq!( 2, linebreak_property('\u{0071}')); assert_eq!( 2, linebreak_property('\u{0074}')); assert_eq!( 2, linebreak_property('\u{0075}')); assert_eq!( 4, linebreak_property('\u{007C}')); assert_eq!( 9, linebreak_property('\u{009D}')); assert_eq!( 2, linebreak_property('\u{00D5}')); assert_eq!( 2, linebreak_property('\u{00D8}')); assert_eq!( 2, linebreak_property('\u{00E9}')); assert_eq!( 2, linebreak_property('\u{0120}')); assert_eq!( 2, linebreak_property('\u{0121}')); assert_eq!( 2, linebreak_property('\u{015C}')); assert_eq!( 2, linebreak_property('\u{016C}')); assert_eq!( 2, linebreak_property('\u{017E}')); assert_eq!( 2, linebreak_property('\u{01B0}')); assert_eq!( 2, linebreak_property('\u{0223}')); assert_eq!( 2, linebreak_property('\u{028D}')); assert_eq!( 2, linebreak_property('\u{02BE}')); assert_eq!( 1, linebreak_property('\u{02D0}')); assert_eq!( 9, linebreak_property('\u{0337}')); assert_eq!( 0, linebreak_property('\u{0380}')); assert_eq!( 2, linebreak_property('\u{04AA}')); assert_eq!( 2, linebreak_property('\u{04CE}')); assert_eq!( 2, linebreak_property('\u{04F1}')); assert_eq!( 2, linebreak_property('\u{0567}')); assert_eq!( 2, linebreak_property('\u{0580}')); assert_eq!( 9, linebreak_property('\u{05A1}')); assert_eq!( 9, linebreak_property('\u{05B0}')); assert_eq!(38, linebreak_property('\u{05D4}')); assert_eq!( 2, linebreak_property('\u{0643}')); assert_eq!( 9, linebreak_property('\u{065D}')); assert_eq!(19, linebreak_property('\u{066C}')); assert_eq!( 2, linebreak_property('\u{066E}')); assert_eq!( 2, linebreak_property('\u{068A}')); assert_eq!( 2, linebreak_property('\u{0776}')); assert_eq!( 2, linebreak_property('\u{07A2}')); assert_eq!( 0, linebreak_property('\u{07BB}')); assert_eq!(19, linebreak_property('\u{1091}')); assert_eq!(19, linebreak_property('\u{1B53}')); assert_eq!( 2, linebreak_property('\u{1EEA}')); assert_eq!(40, linebreak_property('\u{200D}')); assert_eq!(14, linebreak_property('\u{30C7}')); assert_eq!(14, linebreak_property('\u{318B}')); assert_eq!(14, linebreak_property('\u{3488}')); assert_eq!(14, linebreak_property('\u{3B6E}')); assert_eq!(14, linebreak_property('\u{475B}')); assert_eq!(14, linebreak_property('\u{490B}')); assert_eq!(14, linebreak_property('\u{5080}')); assert_eq!(14, linebreak_property('\u{7846}')); assert_eq!(14, linebreak_property('\u{7F3A}')); assert_eq!(14, linebreak_property('\u{8B51}')); assert_eq!(14, linebreak_property('\u{920F}')); assert_eq!(14, linebreak_property('\u{9731}')); assert_eq!(14, linebreak_property('\u{9F3A}')); assert_eq!( 2, linebreak_property('\u{ABD2}')); assert_eq!(19, linebreak_property('\u{ABF6}')); assert_eq!(32, linebreak_property('\u{B2EA}'));<|fim▁hole|> assert_eq!(32, linebreak_property('\u{B796}')); assert_eq!(32, linebreak_property('\u{B9E8}')); assert_eq!(32, linebreak_property('\u{BD42}')); assert_eq!(32, linebreak_property('\u{C714}')); assert_eq!(32, linebreak_property('\u{CC25}')); assert_eq!( 0, linebreak_property('\u{EA59}')); assert_eq!( 0, linebreak_property('\u{F6C8}')); assert_eq!( 0, linebreak_property('\u{F83C}')); assert_eq!( 2, linebreak_property('\u{FC6A}')); assert_eq!( 0, linebreak_property('\u{15199}')); assert_eq!( 0, linebreak_property('\u{163AC}')); assert_eq!( 0, linebreak_property('\u{1EF65}')); assert_eq!(14, linebreak_property('\u{235A7}')); assert_eq!(14, linebreak_property('\u{2E483}')); assert_eq!(14, linebreak_property('\u{2FFFA}')); assert_eq!(14, linebreak_property('\u{3613E}')); assert_eq!(14, linebreak_property('\u{3799A}')); assert_eq!( 0, linebreak_property('\u{4DD35}')); assert_eq!( 0, linebreak_property('\u{5858D}')); assert_eq!( 0, linebreak_property('\u{585C2}')); assert_eq!( 0, linebreak_property('\u{6CF38}')); assert_eq!( 0, linebreak_property('\u{7573F}')); assert_eq!( 0, linebreak_property('\u{7AABF}')); assert_eq!( 0, linebreak_property('\u{87762}')); assert_eq!( 0, linebreak_property('\u{90297}')); assert_eq!( 0, linebreak_property('\u{9D037}')); assert_eq!( 0, linebreak_property('\u{A0E65}')); assert_eq!( 0, linebreak_property('\u{B8E7F}')); assert_eq!( 0, linebreak_property('\u{BBEA5}')); assert_eq!( 0, linebreak_property('\u{BE28C}')); assert_eq!( 0, linebreak_property('\u{C1B57}')); assert_eq!( 0, linebreak_property('\u{C2011}')); assert_eq!( 0, linebreak_property('\u{CBF32}')); assert_eq!( 0, linebreak_property('\u{DD9BD}')); assert_eq!( 0, linebreak_property('\u{DF4A6}')); assert_eq!( 0, linebreak_property('\u{E923D}')); assert_eq!( 0, linebreak_property('\u{E94DB}')); assert_eq!( 0, linebreak_property('\u{F90AB}')); assert_eq!( 0, linebreak_property('\u{100EF6}')); assert_eq!( 0, linebreak_property('\u{106487}')); assert_eq!( 0, linebreak_property('\u{1064B4}')); } #[test] fn linebreak_prop_str() { assert_eq!((9, 1), linebreak_property_str(&"\u{0004}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{0005}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{0008}", 0)); assert_eq!((4, 1), linebreak_property_str(&"\u{0009}", 0)); assert_eq!((17, 1), linebreak_property_str(&"\u{000A}", 0)); assert_eq!((6, 1), linebreak_property_str(&"\u{000C}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{000E}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{0010}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{0013}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{0017}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{001C}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{001D}", 0)); assert_eq!((9, 1), linebreak_property_str(&"\u{001F}", 0)); assert_eq!((11, 1), linebreak_property_str(&"\u{0021}", 0)); assert_eq!((23, 1), linebreak_property_str(&"\u{0027}", 0)); assert_eq!((22, 1), linebreak_property_str(&"\u{002B}", 0)); assert_eq!((13, 1), linebreak_property_str(&"\u{002D}", 0)); assert_eq!((27, 1), linebreak_property_str(&"\u{002F}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{003C}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0043}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{004B}", 0)); assert_eq!((36, 1), linebreak_property_str(&"\u{005D}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0060}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0065}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0066}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0068}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0069}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{006C}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{006D}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0077}", 0)); assert_eq!((2, 1), linebreak_property_str(&"\u{0079}", 0)); assert_eq!((4, 1), linebreak_property_str(&"\u{007C}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{008D}", 0)); assert_eq!((1, 2), linebreak_property_str(&"\u{00D7}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{015C}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{01B5}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{0216}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{0234}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{026E}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{027C}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{02BB}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{0313}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{0343}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{034A}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{0358}", 0)); assert_eq!((0, 2), linebreak_property_str(&"\u{0378}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{038C}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{03A4}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{03AC}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{041F}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{049A}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{04B4}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{04C6}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{0535}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{05B1}", 0)); assert_eq!((0, 2), linebreak_property_str(&"\u{05FF}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{065D}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{067E}", 0)); assert_eq!((19, 2), linebreak_property_str(&"\u{06F5}", 0)); assert_eq!((19, 2), linebreak_property_str(&"\u{06F6}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{0735}", 0)); assert_eq!((2, 2), linebreak_property_str(&"\u{074D}", 0)); assert_eq!((9, 2), linebreak_property_str(&"\u{07A6}", 0)); assert_eq!((0, 2), linebreak_property_str(&"\u{07B9}", 0)); assert_eq!((2, 3), linebreak_property_str(&"\u{131F}", 0)); assert_eq!((40, 3), linebreak_property_str(&"\u{200D}", 0)); assert_eq!((2, 3), linebreak_property_str(&"\u{25DA}", 0)); assert_eq!((2, 3), linebreak_property_str(&"\u{2C01}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{2EE5}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{4207}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{4824}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{491A}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{4C20}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{4D6A}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{50EB}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{521B}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{5979}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{5F9B}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{65AB}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{6B1F}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{7169}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{87CA}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{87FF}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{8A91}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{943A}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{9512}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{9D66}", 0)); assert_eq!((9, 3), linebreak_property_str(&"\u{A928}", 0)); assert_eq!((24, 3), linebreak_property_str(&"\u{AA7E}", 0)); assert_eq!((2, 3), linebreak_property_str(&"\u{AAEA}", 0)); assert_eq!((0, 3), linebreak_property_str(&"\u{AB66}", 0)); assert_eq!((32, 3), linebreak_property_str(&"\u{B9FC}", 0)); assert_eq!((32, 3), linebreak_property_str(&"\u{CD89}", 0)); assert_eq!((32, 3), linebreak_property_str(&"\u{CDB2}", 0)); assert_eq!((0, 3), linebreak_property_str(&"\u{F71D}", 0)); assert_eq!((14, 3), linebreak_property_str(&"\u{F9DF}", 0)); assert_eq!((2, 3), linebreak_property_str(&"\u{FEC3}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{13CC5}", 0)); assert_eq!((2, 4), linebreak_property_str(&"\u{1D945}", 0)); assert_eq!((41, 4), linebreak_property_str(&"\u{1F3C3}", 0)); assert_eq!((42, 4), linebreak_property_str(&"\u{1F3FB}", 0)); assert_eq!((14, 4), linebreak_property_str(&"\u{2BDCD}", 0)); assert_eq!((14, 4), linebreak_property_str(&"\u{3898E}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{45C35}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{4EC30}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{58EE2}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{5E3E8}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{5FB7D}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{6A564}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{6C591}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{6CA82}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{83839}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{88F47}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{91CA0}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{95644}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{AC335}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{AE8BF}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{B282B}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{B4CFC}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{BBED0}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{CCC89}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{D40EB}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{D65F5}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{D8E0B}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{DF93A}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{E4E2C}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{F7935}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{F9DFF}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{1094B7}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{10C782}", 0)); assert_eq!((0, 4), linebreak_property_str(&"\u{10E4D5}", 0)); } #[test] fn lb_iter_simple() { assert_eq!(vec![(6, false), (11, true)], LineBreakIterator::new("hello world").collect::<Vec<_>>()); // LB7, LB18 assert_eq!(vec![(3, false), (4, true)], LineBreakIterator::new("a b").collect::<Vec<_>>()); // LB5 assert_eq!(vec![(2, true), (3, true)], LineBreakIterator::new("a\nb").collect::<Vec<_>>()); assert_eq!(vec![(2, true), (4, true)], LineBreakIterator::new("\r\n\r\n").collect::<Vec<_>>()); // LB8a assert_eq!(vec![(7, true)], LineBreakIterator::new("\u{200D}\u{1F3FB}").collect::<Vec<_>>()); // LB10 combining mark after space assert_eq!(vec![(2, false), (4, true)], LineBreakIterator::new("a \u{301}").collect::<Vec<_>>()); // LB15 assert_eq!(vec![(3, true)], LineBreakIterator::new("\" [").collect::<Vec<_>>()); // LB17 assert_eq!(vec![(2, false), (10, false), (11, true)], LineBreakIterator::new("a \u{2014} \u{2014} c").collect::<Vec<_>>()); // LB18 assert_eq!(vec![(2, false), (6, false), (7, true)], LineBreakIterator::new("a \"b\" c").collect::<Vec<_>>()); // LB21 assert_eq!(vec![(2, false), (3, true)], LineBreakIterator::new("a-b").collect::<Vec<_>>()); // LB21a assert_eq!(vec![(5, true)], LineBreakIterator::new("\u{05D0}-\u{05D0}").collect::<Vec<_>>()); // LB23a assert_eq!(vec![(6, true)], LineBreakIterator::new("$\u{1F3FB}%").collect::<Vec<_>>()); // LB30b assert_eq!(vec![(8, true)], LineBreakIterator::new("\u{1F466}\u{1F3FB}").collect::<Vec<_>>()); // LB31 assert_eq!(vec![(8, false), (16, true)], LineBreakIterator::new("\u{1F1E6}\u{1F1E6}\u{1F1E6}\u{1F1E6}").collect::<Vec<_>>()); } }<|fim▁end|>
assert_eq!(32, linebreak_property('\u{B3F5}'));
<|file_name|>patch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Luis Alberto Perez Lazaro <[email protected]> # Copyright: (c) 2015, Jakub Jirutka <[email protected]> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: patch author: - Jakub Jirutka (@jirutka) - Luis Alberto Perez Lazaro (@luisperlaz) version_added: '1.9' description: - Apply patch files using the GNU patch tool. short_description: Apply patch files using the GNU patch tool options: basedir: description: - Path of a base directory in which the patch file will be applied. - May be omitted when C(dest) option is specified, otherwise required. type: path dest: description: - Path of the file on the remote machine to be patched. - The names of the files to be patched are usually taken from the patch file, but if there's just one file to be patched it can specified with this option. type: path aliases: [ originalfile ] src: description: - Path of the patch file as accepted by the GNU patch tool. If C(remote_src) is 'no', the patch source file is looked up from the module's I(files) directory. type: path required: true aliases: [ patchfile ] state: description: - Whether the patch should be applied or reverted. type: str choices: [ absent, present ] default: present version_added: "2.6" remote_src: description: - If C(no), it will search for src at originating/master machine, if C(yes) it will go to the remote/target machine for the C(src). type: bool default: no strip: description: - Number that indicates the smallest prefix containing leading slashes<|fim▁hole|> - For more information see the strip parameter of the GNU patch tool. type: int default: 0 backup: version_added: "2.0" description: - Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies. type: bool default: no binary: version_added: "2.0" description: - Setting to C(yes) will disable patch's heuristic for transforming CRLF line endings into LF. - Line endings of src and dest must match. - If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX. type: bool default: no notes: - This module requires GNU I(patch) utility to be installed on the remote host. ''' EXAMPLES = r''' - name: Apply patch to one file patch: src: /tmp/index.html.patch dest: /var/www/index.html - name: Apply patch to multiple files under basedir patch: src: /tmp/customize.patch basedir: /var/www strip: 1 - name: Revert patch to one file patch: src: /tmp/index.html.patch dest: /var/www/index.html state: absent ''' import os from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, get_platform from ansible.module_utils._text import to_native class PatchError(Exception): pass def add_dry_run_option(opts): # Older versions of FreeBSD, OpenBSD and NetBSD support the --check option only. if get_platform().lower() in ['openbsd', 'netbsd', 'freebsd']: opts.append('--check') else: opts.append('--dry-run') def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'): opts = ['--quiet', '--forward', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] add_dry_run_option(opts) if binary: opts.append('--binary') if dest_file: opts.append("'%s'" % dest_file) if state == 'present': opts.append('--reverse') (rc, _, _) = patch_func(opts) return rc == 0 def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'): opts = ['--quiet', '--forward', '--batch', '--reject-file=-', "--strip=%s" % strip, "--directory='%s'" % basedir, "--input='%s'" % patch_file] if dry_run: add_dry_run_option(opts) if binary: opts.append('--binary') if dest_file: opts.append("'%s'" % dest_file) if backup: opts.append('--backup --version-control=numbered') if state == 'absent': opts.append('--reverse') (rc, out, err) = patch_func(opts) if rc != 0: msg = err or out raise PatchError(msg) def main(): module = AnsibleModule( argument_spec=dict( src=dict(type='path', required=True, aliases=['patchfile']), dest=dict(type='path', aliases=['originalfile']), basedir=dict(type='path'), strip=dict(type='int', default=0), remote_src=dict(type='bool', default=False), # NB: for 'backup' parameter, semantics is slightly different from standard # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") backup=dict(type='bool', default=False), binary=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), ), required_one_of=[['dest', 'basedir']], supports_check_mode=True, ) # Create type object as namespace for module params p = type('Params', (), module.params) if not os.access(p.src, os.R_OK): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) if p.dest and not os.access(p.dest, os.W_OK): module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest)) if p.basedir and not os.path.exists(p.basedir): module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) if not p.basedir: p.basedir = os.path.dirname(p.dest) patch_bin = module.get_bin_path('patch') if patch_bin is None: module.fail_json(msg="patch command not found") def patch_func(opts): return module.run_command('%s %s' % (patch_bin, ' '.join(opts))) # patch need an absolute file name p.src = os.path.abspath(p.src) changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state): try: apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, dry_run=module.check_mode, backup=p.backup, state=p.state) changed = True except PatchError as e: module.fail_json(msg=to_native(e), exception=format_exc()) module.exit_json(changed=changed) if __name__ == '__main__': main()<|fim▁end|>
that will be stripped from each file name found in the patch file.
<|file_name|>app.module.ts<|end_file_name|><|fim▁begin|>import { BrowserModule } from '@angular/platform-browser'; import {NgModule} from '@angular/core'; import {HttpModule} from '@angular/http'; import { HttpClientModule } from '@angular/common/http'; import {BrowserAnimationsModule} from '@angular/platform-browser/animations'; import { MdButtonModule, MdCardModule, MdIconModule,<|fim▁hole|> MdToolbarModule, MdSelectModule, MdDialogModule, MdDialog, MdTabsModule, MdSidenavModule, MdTooltipModule, MdListModule, MdSlideToggleModule } from '@angular/material'; import {FormsModule, ReactiveFormsModule} from '@angular/forms'; import {RouterModule, Routes} from '@angular/router'; import { AppComponent } from './app.component'; import {FlexLayoutModule} from '@angular/flex-layout'; import {DndModule, DragDropService, DragDropConfig} from 'ng2-dnd'; import { LoginComponent } from './login/login.component'; import { DataService } from './data.service'; import { ProfessorComponent } from './professor/professor.component'; import { StarRatingModule } from 'angular-star-rating'; import { TadetailsComponent } from './tadetails/tadetails.component'; import { StudentComponent } from './student/student.component'; import { PdfViewerComponent } from 'ng2-pdf-viewer'; import {SharedService} from "./shared.service"; import { StudentprofileComponent } from './studentprofile/studentprofile.component'; import { CommonModule } from '@angular/common'; import { StudenthomeComponent } from './studenthome/studenthome.component'; import { GpaChangeComponent } from './studentprofile/studentprofile.component'; import { loginPopComponent } from './login/login.component'; import { CourseComponent } from './course/course.component'; import { FileSelectDirective } from 'ng2-file-upload'; import { ManagerComponent } from './manager/manager.component'; import { CarouselModule } from 'angular4-carousel'; import { FileUploadService } from './file-upload.service'; export const ROUTES: Routes = [ {path: '', redirectTo: '/login', pathMatch: 'full'}, {path: 'login', component: LoginComponent}, {path: 'prof', component: ProfessorComponent}, {path: 'student', component: StudentComponent}, {path: 'manager', component: ManagerComponent}, {path: 'course/:id', component: CourseComponent} ]; @NgModule({ declarations: [ AppComponent, LoginComponent, ProfessorComponent, TadetailsComponent, StudentComponent, PdfViewerComponent, StudentprofileComponent, StudenthomeComponent, GpaChangeComponent, loginPopComponent, CourseComponent, FileSelectDirective, ManagerComponent ], imports: [ BrowserModule, BrowserAnimationsModule, HttpModule, HttpClientModule, RouterModule.forRoot(ROUTES), MdButtonModule, MdToolbarModule, MdCardModule, MdIconModule, MdTabsModule, MdTooltipModule, MdListModule, MdSlideToggleModule, FormsModule, ReactiveFormsModule, FlexLayoutModule, DndModule.forRoot(), MdMenuModule, MdSelectModule, MdDialogModule, MdSidenavModule, StarRatingModule.forRoot(), CommonModule, CarouselModule ], providers: [DragDropService, DragDropConfig, DataService, SharedService, MdDialogModule, MdDialog, MdSidenavModule, PdfViewerComponent, FileUploadService], bootstrap: [AppComponent], entryComponents: [TadetailsComponent, GpaChangeComponent, loginPopComponent] // Added for TA Info Dialog Box Issue #15 }) export class AppModule { }<|fim▁end|>
MdMenuModule,
<|file_name|>leap.js<|end_file_name|><|fim▁begin|>/** * Copyright (c) 2013, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * https://raw.github.com/facebook/regenerator/master/LICENSE file. An * additional grant of patent rights can be found in the PATENTS file in * the same directory. */ var assert = require("assert"); var types = require("ast-types"); var n = types.namedTypes; var b = types.builders; var inherits = require("util").inherits; function Entry() { assert.ok(this instanceof Entry); } function FunctionEntry(returnLoc) { Entry.call(this); n.Literal.assert(returnLoc); Object.defineProperties(this, { returnLoc: { value: returnLoc } }); } inherits(FunctionEntry, Entry); exports.FunctionEntry = FunctionEntry; function LoopEntry(breakLoc, continueLoc, label) {<|fim▁hole|> n.Literal.assert(breakLoc); n.Literal.assert(continueLoc); if (label) { n.Identifier.assert(label); } else { label = null; } Object.defineProperties(this, { breakLoc: { value: breakLoc }, continueLoc: { value: continueLoc }, label: { value: label } }); } inherits(LoopEntry, Entry); exports.LoopEntry = LoopEntry; function SwitchEntry(breakLoc) { Entry.call(this); n.Literal.assert(breakLoc); Object.defineProperties(this, { breakLoc: { value: breakLoc } }); } inherits(SwitchEntry, Entry); exports.SwitchEntry = SwitchEntry; function TryEntry(catchEntry, finallyEntry) { Entry.call(this); if (catchEntry) { assert.ok(catchEntry instanceof CatchEntry); } else { catchEntry = null; } if (finallyEntry) { assert.ok(finallyEntry instanceof FinallyEntry); } else { finallyEntry = null; } Object.defineProperties(this, { catchEntry: { value: catchEntry }, finallyEntry: { value: finallyEntry } }); } inherits(TryEntry, Entry); exports.TryEntry = TryEntry; function CatchEntry(firstLoc, paramId) { Entry.call(this); n.Literal.assert(firstLoc); n.Identifier.assert(paramId); Object.defineProperties(this, { firstLoc: { value: firstLoc }, paramId: { value: paramId } }); } inherits(CatchEntry, Entry); exports.CatchEntry = CatchEntry; function FinallyEntry(firstLoc, nextLocTempVar) { Entry.call(this); n.Literal.assert(firstLoc); n.Identifier.assert(nextLocTempVar); Object.defineProperties(this, { firstLoc: { value: firstLoc }, nextLocTempVar: { value: nextLocTempVar } }); } inherits(FinallyEntry, Entry); exports.FinallyEntry = FinallyEntry; function LeapManager(emitter) { assert.ok(this instanceof LeapManager); var Emitter = require("./emit").Emitter; assert.ok(emitter instanceof Emitter); Object.defineProperties(this, { emitter: { value: emitter }, entryStack: { value: [new FunctionEntry(emitter.finalLoc)] } }); } var LMp = LeapManager.prototype; exports.LeapManager = LeapManager; LMp.withEntry = function(entry, callback) { assert.ok(entry instanceof Entry); this.entryStack.push(entry); try { callback.call(this.emitter); } finally { var popped = this.entryStack.pop(); assert.strictEqual(popped, entry); } }; LMp._leapToEntry = function(predicate, defaultLoc) { var entry, loc; var finallyEntries = []; var skipNextTryEntry = null; for (var i = this.entryStack.length - 1; i >= 0; --i) { entry = this.entryStack[i]; if (entry instanceof CatchEntry || entry instanceof FinallyEntry) { // If we are inside of a catch or finally block, then we must // have exited the try block already, so we shouldn't consider // the next TryStatement as a handler for this throw. skipNextTryEntry = entry; } else if (entry instanceof TryEntry) { if (skipNextTryEntry) { // If an exception was thrown from inside a catch block and this // try statement has a finally block, make sure we execute that // finally block. if (skipNextTryEntry instanceof CatchEntry && entry.finallyEntry) { finallyEntries.push(entry.finallyEntry); } skipNextTryEntry = null; } else if ((loc = predicate.call(this, entry))) { break; } else if (entry.finallyEntry) { finallyEntries.push(entry.finallyEntry); } } else if ((loc = predicate.call(this, entry))) { break; } } if (loc) { // fall through } else if (defaultLoc) { loc = defaultLoc; } else { return null; } n.Literal.assert(loc); var finallyEntry; while ((finallyEntry = finallyEntries.pop())) { this.emitter.emitAssign(finallyEntry.nextLocTempVar, loc); loc = finallyEntry.firstLoc; } return loc; }; function getLeapLocation(entry, property, label) { var loc = entry[property]; if (loc) { if (label) { if (entry.label && entry.label.name === label.name) { return loc; } } else { return loc; } } return null; } LMp.emitBreak = function(label) { var loc = this._leapToEntry(function(entry) { return getLeapLocation(entry, "breakLoc", label); }); if (loc === null) { throw new Error("illegal break statement"); } this.emitter.clearPendingException(); this.emitter.jump(loc); }; LMp.emitContinue = function(label) { var loc = this._leapToEntry(function(entry) { return getLeapLocation(entry, "continueLoc", label); }); if (loc === null) { throw new Error("illegal continue statement"); } this.emitter.clearPendingException(); this.emitter.jump(loc); };<|fim▁end|>
Entry.call(this);
<|file_name|>user.py<|end_file_name|><|fim▁begin|># coding: utf-8 from libs.redis_storage import db1 class User(object): def __init__(self, **kwargs): pk = kwargs.get('pk') or db1.incr('new_user_id') kwargs['pk'] = pk db1.hmset('user::{}'.format(pk), kwargs) super(User, self).__setattr__('pk', pk) super(User, self).__setattr__( '__info__', db1.hgetall(self.db_key) or {} ) for k, v in self.__info__.iteritems(): self.__info__[k] = v.decode('utf-8') @property def short_info(self): return {field: getattr(self, field) for field in [ 'fio', 'sex', 'avatar', 'battles', 'wins', 'defeats', 'last_update' ]} @property def db_key(self): return 'user::{}'.format(self.pk) @property def fio(self): return u'{} {}'.format(self.last_name or u'', self.first_name or u'') @property def battles(self): return int(self.__info__.get('battles', 0)) @property def wins(self): return int(self.__info__.get('wins', 0)) @property def defeats(self): return int(self.__info__.get('defeats', 0)) @property def last_update(self): return int(self.__info__.get('last_update', 0)) def __setattr__(self, attr, value): self.__info__[attr] = value db1.hset(self.db_key, attr, value) def __getattr__(self, attr): return self.__info__.get(attr) <|fim▁hole|> def get_user_by_service(service, service_user_id): user_pk = db1.get('{}_user_id::{}'.format(service, service_user_id)) if user_pk: return User(pk=user_pk) def add_service_to_user(service, service_user_id, user_pk): db1.set('{}_user_id::{}'.format(service, service_user_id), user_pk) user = User(pk=user_pk) setattr(user, '{}_user_id'.format(service), service_user_id)<|fim▁end|>
def incr(self, attr, by=1): db1.hincrby(self.db_key, attr, by)
<|file_name|>UserString.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python ## vim:ts=4:et:nowrap """A user-defined wrapper around string objects Note: string objects have grown methods in Python 1.6 This module requires Python 1.6 or later. """ from types import StringType, UnicodeType import sys __all__ = ["UserString","MutableString"] class UserString: def __init__(self, seq): if isinstance(seq, StringType) or isinstance(seq, UnicodeType): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __long__(self): return long(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __cmp__(self, string): if isinstance(string, UserString): return cmp(self.data, string.data) else: return cmp(self.data, string) def __contains__(self, char): return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __getslice__(self, start, end): start = max(start, 0); end = max(end, 0) return self.__class__(self.data[start:end]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, StringType) or isinstance(other, UnicodeType): return self.__class__(self.data + other) else: return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, StringType) or isinstance(other, UnicodeType): return self.__class__(other + self.data) else: return self.__class__(str(other) + self.data) def __iadd__(self, other): if isinstance(other, UserString): self.data += other.data elif isinstance(other, StringType) or isinstance(other, UnicodeType): self.data += other else: self.data += str(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def center(self, width): return self.__class__(self.data.center(width)) def count(self, sub, start=0, end=sys.maxint): return self.data.count(sub, start, end) def encode(self, encoding=None, errors=None): # XXX improve this? if encoding: if errors: return self.__class__(self.data.encode(encoding, errors)) else: return self.__class__(self.data.encode(encoding)) else: return self.__class__(self.data.encode()) def endswith(self, suffix, start=0, end=sys.maxint): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=sys.maxint): return self.data.find(sub, start, end) def index(self, sub, start=0, end=sys.maxint): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric()<|fim▁hole|> def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width): return self.__class__(self.data.ljust(width)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self): return self.__class__(self.data.lstrip()) def replace(self, old, new, maxsplit=-1): return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=sys.maxint): return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=sys.maxint): return self.data.rindex(sub, start, end) def rjust(self, width): return self.__class__(self.data.rjust(width)) def rstrip(self): return self.__class__(self.data.rstrip()) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def splitlines(self, keepends=0): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=sys.maxint): return self.data.startswith(prefix, start, end) def strip(self): return self.__class__(self.data.strip()) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) class MutableString(UserString): """mutable string objects Python strings are immutable objects. This has the advantage, that strings may be used as dictionary keys. If this property isn't needed and you insist on changing string values in place instead, you may cheat and use MutableString. But the purpose of this class is an educational one: to prevent people from inventing their own mutable string class derived from UserString and than forget thereby to remove (override) the __hash__ method inherited from ^UserString. This would lead to errors that would be very hard to track down. A faster and better solution is to rewrite your program using lists.""" def __init__(self, string=""): self.data = string def __hash__(self): raise TypeError, "unhashable type (it is mutable)" def __setitem__(self, index, sub): if index < 0 or index >= len(self.data): raise IndexError self.data = self.data[:index] + sub + self.data[index+1:] def __delitem__(self, index): if index < 0 or index >= len(self.data): raise IndexError self.data = self.data[:index] + self.data[index+1:] def __setslice__(self, start, end, sub): start = max(start, 0); end = max(end, 0) if isinstance(sub, UserString): self.data = self.data[:start]+sub.data+self.data[end:] elif isinstance(sub, StringType) or isinstance(sub, UnicodeType): self.data = self.data[:start]+sub+self.data[end:] else: self.data = self.data[:start]+str(sub)+self.data[end:] def __delslice__(self, start, end): start = max(start, 0); end = max(end, 0) self.data = self.data[:start] + self.data[end:] def immutable(self): return UserString(self.data) if __name__ == "__main__": # execute the regression test to stdout, if called as a script: import os called_in_dir, called_as = os.path.split(sys.argv[0]) called_in_dir = os.path.abspath(called_in_dir) called_as, py = os.path.splitext(called_as) sys.path.append(os.path.join(called_in_dir, 'test')) if '-q' in sys.argv: import test_support test_support.verbose = 0 __import__('test_' + called_as.lower())<|fim▁end|>
def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle()
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= #![doc( html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png" )] //! <p><fullname>Amazon Elastic Container Registry</fullname> <p>Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports private repositories with resource-based permissions using IAM so that specific users or Amazon EC2 instances can access repositories and images.</p></p> //! //! If you're using the service, you're probably looking for [EcrClient](struct.EcrClient.html) and [Ecr](trait.Ecr.html). mod custom; mod generated; pub use custom::*;<|fim▁hole|>pub use generated::*;<|fim▁end|>
<|file_name|>parser.js<|end_file_name|><|fim▁begin|>var REGEX = require('REGEX'), MAX_SINGLE_TAG_LENGTH = 30, create = require('DIV/create'); var parseString = function(parentTagName, htmlStr) { var parent = create(parentTagName); parent.innerHTML = htmlStr; return parent; }; <|fim▁hole|> var singleTagMatch = REGEX.singleTagMatch(htmlStr); return singleTagMatch ? [create(singleTagMatch[1])] : null; }; module.exports = function(htmlStr) { var singleTag = parseSingleTag(htmlStr); if (singleTag) { return singleTag; } var parentTagName = REGEX.getParentTagName(htmlStr), parent = parseString(parentTagName, htmlStr); var child, idx = parent.children.length, arr = Array(idx); while (idx--) { child = parent.children[idx]; parent.removeChild(child); arr[idx] = child; } parent = null; return arr.reverse(); };<|fim▁end|>
var parseSingleTag = function(htmlStr) { if (htmlStr.length > MAX_SINGLE_TAG_LENGTH) { return null; }
<|file_name|>svg.py<|end_file_name|><|fim▁begin|>class Printer: def __init__(self): self._output = '' def print_output(self, output): self._output += output def print_line(self, x1, y1, x2, y2, color=0, width=1): self.print_output(_svg_line(x1, y1, x2, y2, color=color, width=width)) def print_circle(self, x, y, r, color=0, width=1, border_color=0): self.print_output(_svg_circle(x, y, r, color=color, width=width, border_color=border_color)) def print_square(self, x, y, a, color=0, width=1, border_color=0): self.print_output(_svg_rectangle(x, y, a, a, color=color, width=width, border_color=border_color)) def print_text(self, x, y, text, color=0, font_size=12): self.print_output(_svg_text(x, y, text, color=color, font_size=font_size)) def to_file(self, filename): with open(filename, 'w') as f: f.write(str(self)) def __str__(self): return """<svg width="100%" height="100%" version="1.1" xmlns="http://www.w3.org/2000/svg"> {} </svg> """.format(self._output) def _svg_line(x1, y1, x2, y2, color, width): color = _svg_color(color) return '<line x1="{}" y1="{}" x2="{}" y2="{}" style="stroke-linecap:round;stroke:{};stroke-width:{};" />\n'.format(x1, y1, x2, y2, color, width) def _svg_circle(x, y, r, color, width, border_color): color = _svg_color(color) border_color = _svg_color(color) return '<circle cx="{}" cy="{}" r="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, r, color, border_color, width) <|fim▁hole|>def _svg_rectangle(x, y, a, b, color, width, border_color): color = _svg_color(color) border_color = _svg_color(border_color) return '<rect x="{}" y="{}" width="{}" height="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, a, b, color, border_color, width) def _svg_text(x, y, text, color, font_size): color = _svg_color(color) return '<text x="{}" y="{}" font-family="Nimbus Sans L" font-size="{}" fill="{}">{}</text>\n'.format(x, y, font_size, color, text) def _svg_color(color): if isinstance(color, str): return color return 'rgb({}, {}, {})'.format(color, color, color)<|fim▁end|>
<|file_name|>EnvironmentVariable58StateElement.py<|end_file_name|><|fim▁begin|># Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version.<|fim▁hole|># GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. import logging from scap.model.oval_5.defs.independent.StateType import StateType logger = logging.getLogger(__name__) class EnvironmentVariable58StateElement(StateType): MODEL_MAP = { 'tag_name': 'environmentvariable58_state', 'elements': [ {'tag_name': 'pid', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0, 'max': 1}, {'tag_name': 'name', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0, 'max': 1}, {'tag_name': 'value', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1}, ], }<|fim▁end|>
# # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
<|file_name|>ccl_bplist.py<|end_file_name|><|fim▁begin|>""" Copyright (c) 2012, CCL Forensics All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the CCL Forensics nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CCL FORENSICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys import os import struct import datetime __version__ = "0.11" __description__ = "Converts Apple binary PList files into a native Python data structure" __contact__ = "Alex Caithness" class BplistError(Exception): pass class BplistUID: def __init__(self, value): self.value = value def __repr__(self): return "UID: {0}".format(self.value) def __str__(self): return self.__repr__() def __decode_multibyte_int(b, signed=True): if len(b) == 1: fmt = ">B" # Always unsigned? elif len(b) == 2: fmt = ">h" elif len(b) == 3: if signed: return ((b[0] << 16) | struct.unpack(">H", b[1:])[0]) - ((b[0] >> 7) * 2 * 0x800000) else: return (b[0] << 16) | struct.unpack(">H", b[1:])[0] elif len(b) == 4: fmt = ">i" elif len(b) == 8: fmt = ">q" else: raise BplistError("Cannot decode multibyte int of length {0}".format(len(b))) if signed and len(b) > 1: return struct.unpack(fmt.lower(), b)[0] else: return struct.unpack(fmt.upper(), b)[0] def __decode_float(b, signed=True): if len(b) == 4: fmt = ">f" elif len(b) == 8: fmt = ">d" else: raise BplistError("Cannot decode float of length {0}".format(len(b))) if signed: return struct.unpack(fmt.lower(), b)[0] else: return struct.unpack(fmt.upper(), b)[0] def __decode_object(f, offset, collection_offset_size, offset_table): # Move to offset and read type #print("Decoding object at offset {0}".format(offset)) f.seek(offset) # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: type_byte = ord(f.read(1)[0]) else: type_byte = f.read(1)[0] #print("Type byte: {0}".format(hex(type_byte))) if type_byte == 0x00: # Null 0000 0000 return None elif type_byte == 0x08: # False 0000 1000 return False elif type_byte == 0x09: # True 0000 1001 return True elif type_byte == 0x0F: # Fill 0000 1111 raise BplistError("Fill type not currently supported at offset {0}".format(f.tell())) # Not sure what to return really... elif type_byte & 0xF0 == 0x10: # Int 0001 xxxx int_length = 2 ** (type_byte & 0x0F) int_bytes = f.read(int_length) return __decode_multibyte_int(int_bytes) elif type_byte & 0xF0 == 0x20: # Float 0010 nnnn float_length = 2 ** (type_byte & 0x0F) float_bytes = f.read(float_length) return __decode_float(float_bytes) elif type_byte & 0xFF == 0x33: # Date 0011 0011 date_bytes = f.read(8) date_value = __decode_float(date_bytes) return datetime.datetime(2001,1,1) + datetime.timedelta(seconds = date_value) elif type_byte & 0xF0 == 0x40: # Data 0100 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb data_length = type_byte & 0x0F else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] if int_type_byte & 0xF0 != 0x10: raise BplistError("Long Data field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) data_length = __decode_multibyte_int(int_bytes, False) return f.read(data_length) elif type_byte & 0xF0 == 0x50: # ASCII 0101 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb ascii_length = type_byte & 0x0F else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] if int_type_byte & 0xF0 != 0x10: raise BplistError("Long ASCII field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) ascii_length = __decode_multibyte_int(int_bytes, False) return f.read(ascii_length).decode("ascii") elif type_byte & 0xF0 == 0x60: # UTF-16 0110 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb utf16_length = (type_byte & 0x0F) * 2 # Length is characters - 16bit width else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] if int_type_byte & 0xF0 != 0x10: raise BplistError("Long UTF-16 field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) utf16_length = __decode_multibyte_int(int_bytes, False) * 2 return f.read(utf16_length).decode("utf_16_be") elif type_byte & 0xF0 == 0x80: # UID 1000 nnnn uid_length = (type_byte & 0x0F) + 1 uid_bytes = f.read(uid_length) return BplistUID(__decode_multibyte_int(uid_bytes, signed=False)) elif type_byte & 0xF0 == 0xA0: # Array 1010 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb array_count = type_byte & 0x0F else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] if int_type_byte & 0xF0 != 0x10: raise BplistError("Long Array field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) array_count = __decode_multibyte_int(int_bytes, signed=False) array_refs = [] for i in range(array_count): array_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False)) return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in array_refs] elif type_byte & 0xF0 == 0xC0: # Set 1010 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb set_count = type_byte & 0x0F else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] if int_type_byte & 0xF0 != 0x10: raise BplistError("Long Set field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) set_count = __decode_multibyte_int(int_bytes, signed=False) set_refs = [] for i in range(set_count): set_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False)) return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in set_refs] elif type_byte & 0xF0 == 0xD0: # Dict 1011 nnnn if type_byte & 0x0F != 0x0F: # length in 4 lsb dict_count = type_byte & 0x0F else: # A little hack to keep the script portable between py2.x and py3k if sys.version_info[0] < 3: int_type_byte = ord(f.read(1)[0]) else: int_type_byte = f.read(1)[0] #print("Dictionary length int byte: {0}".format(hex(int_type_byte))) if int_type_byte & 0xF0 != 0x10: raise BplistError("Long Dict field definition not followed by int type at offset {0}".format(f.tell())) int_length = 2 ** (int_type_byte & 0x0F) int_bytes = f.read(int_length) dict_count = __decode_multibyte_int(int_bytes, signed=False) key_refs = [] #print("Dictionary count: {0}".format(dict_count)) for i in range(dict_count): key_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False)) value_refs = [] for i in range(dict_count): value_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False)) dict_result = {} for i in range(dict_count): #print("Key ref: {0}\tVal ref: {1}".format(key_refs[i], value_refs[i])) key = __decode_object(f, offset_table[key_refs[i]], collection_offset_size, offset_table) val = __decode_object(f, offset_table[value_refs[i]], collection_offset_size, offset_table) dict_result[key] = val return dict_result def load(f): """ Reads and converts a file-like object containing a binary property list. Takes a file-like object (must support reading and seeking) as an argument Returns a data structure representing the data in the property list """ # Check magic number if f.read(8) != b"bplist00": raise BplistError("Bad file header") # Read trailer f.seek(-32, os.SEEK_END) trailer = f.read(32) offset_int_size, collection_offset_size, object_count, top_level_object_index, offest_table_offset = struct.unpack(">6xbbQQQ", trailer) # Read offset table f.seek(offest_table_offset) offset_table = [] for i in range(object_count): offset_table.append(__decode_multibyte_int(f.read(offset_int_size), False)) return __decode_object(f, offset_table[top_level_object_index], collection_offset_size, offset_table) def NSKeyedArchiver_convert(o, object_table): if isinstance(o, list): return NsKeyedArchiverList(o, object_table) elif isinstance(o, dict): return NsKeyedArchiverDictionary(o, object_table) elif isinstance(o, BplistUID): return NSKeyedArchiver_convert(object_table[o.value], object_table) else: return o <|fim▁hole|> def __init__(self, original_dict, object_table): super(NsKeyedArchiverDictionary, self).__init__(original_dict) self.object_table = object_table def __getitem__(self, index): o = super(NsKeyedArchiverDictionary, self).__getitem__(index) return NSKeyedArchiver_convert(o, self.object_table) class NsKeyedArchiverList(list): def __init__(self, original_iterable, object_table): super(NsKeyedArchiverList, self).__init__(original_iterable) self.object_table = object_table def __getitem__(self, index): o = super(NsKeyedArchiverList, self).__getitem__(index) return NSKeyedArchiver_convert(o, self.object_table) def __iter__(self): for o in super(NsKeyedArchiverList, self).__iter__(): yield NSKeyedArchiver_convert(o, self.object_table) def deserialise_NsKeyedArchiver(obj): """Deserialises an NSKeyedArchiver bplist rebuilding the structure. obj should usually be the top-level object returned by the load() function.""" # Check that this is an archiver and version we understand if not isinstance(obj, dict): raise TypeError("obj must be a dict") if "$archiver" not in obj or obj["$archiver"] != "NSKeyedArchiver": raise ValueError("obj does not contain an '$archiver' key or the '$archiver' is unrecognised") if "$version" not in obj or obj["$version"] != 100000: raise ValueError("obj does not contain a '$version' key or the '$version' is unrecognised") object_table = obj["$objects"] if "root" in obj["$top"]: return NSKeyedArchiver_convert(obj["$top"]["root"], object_table) else: return NSKeyedArchiver_convert(obj["$top"], object_table) # NSMutableDictionary convenience functions def is_nsmutabledictionary(obj): if not isinstance(obj, dict): #print("not dict") return False if "$class" not in obj.keys(): #print("no class") return False if obj["$class"].get("$classname") != "NSMutableDictionary": #print("wrong class") return False if "NS.keys" not in obj.keys(): #print("no keys") return False if "NS.objects" not in obj.keys(): #print("no objects") return False return True def convert_NSMutableDictionary(obj): """Converts a NSKeyedArchiver serialised NSMutableDictionary into a straight dictionary (rather than two lists as it is serialised as)""" # The dictionary is serialised as two lists (one for keys and one # for values) which obviously removes all convenience afforded by # dictionaries. This function converts this structure to an # actual dictionary so that values can be accessed by key. if not is_nsmutabledictionary(obj): raise ValueError("obj does not have the correct structure for a NSMutableDictionary serialised to a NSKeyedArchiver") keys = obj["NS.keys"] vals = obj["NS.objects"] # sense check the keys and values: if not isinstance(keys, list): raise TypeError("The 'NS.keys' value is an unexpected type (expected list; actual: {0}".format(type(keys))) if not isinstance(vals, list): raise TypeError("The 'NS.objects' value is an unexpected type (expected list; actual: {0}".format(type(vals))) if len(keys) != len(vals): raise ValueError("The length of the 'NS.keys' list ({0}) is not equal to that of the 'NS.objects ({1})".format(len(keys), len(vals))) result = {} for i,k in enumerate(keys): if "k" in result: raise ValueError("The 'NS.keys' list contains duplicate entries") result[k] = vals[i] return result<|fim▁end|>
class NsKeyedArchiverDictionary(dict):
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import codecs import os from setuptools import setup, find_packages def read(filename): filepath = os.path.join(os.path.dirname(__file__), filename) return codecs.open(filepath, encoding='utf-8').read() setup( name='lemon-filebrowser', version='0.1.2', license='ISC', description="Fork of Patrick Kranzlmueller's django-filebrowser app.", url='https://github.com/trilan/lemon-filebrowser',<|fim▁hole|> author_email='[email protected]', packages=find_packages(exclude=['tests', 'tests.*']), include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: ISC License (ISCL)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', ], )<|fim▁end|>
author='Trilan Team',
<|file_name|>page.py<|end_file_name|><|fim▁begin|>from google.appengine.ext.webapp import template from models.user import User from models.page import Page import webapp2 import json class PageHandler(webapp2.RequestHandler): def get(self, page_id): template_params = {} user = None if self.request.cookies.get('session'): user = User.checkToken(self.request.cookies.get('session')) if not user: self.redirect('/') page = Page.getPageUser(user,title) if page: html = template.render("web/templates/page.html", template_params)<|fim▁hole|>app = webapp2.WSGIApplication([ ('/pages/(.*)', PageHandler), ], debug=True)<|fim▁end|>
self.response.write(html)
<|file_name|>test_costs.py<|end_file_name|><|fim▁begin|># Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- ''' Test of the cost functions ''' import numpy as np from neon import NervanaObject from neon.transforms import (CrossEntropyBinary, CrossEntropyMulti, SumSquared, Misclassification) def compare_tensors(func, y, t, outputs, deriv=False, tol=0.): be = NervanaObject.be temp = be.empty(outputs.shape) dtypeu = np.float32 if deriv is True: temp[:] = func.bprop(be.array(dtypeu(y)), be.array(dtypeu(t))) else: # try: temp[:] = func(be.array(dtypeu(y)), be.array(dtypeu(t))) # except: # import ipdb; ipdb.set_trace() cond = np.sum(np.abs(temp.get() - outputs) <= tol) assert cond == np.prod(outputs.shape) """ CrossEntropyBinary """ def test_cross_entropy_binary(backend_default): outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1)) targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1)) eps = 2 ** -23 expected_result = np.sum((-targets * np.log(outputs + eps)) - (1 - targets) * np.log(1 - outputs + eps), keepdims=True) compare_tensors(CrossEntropyBinary(), outputs, targets, expected_result, tol=1e-6) def test_cross_entropy_binary_limits(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) eps = 2 ** -23 expected_result = np.sum((-targets * np.log(outputs + eps)) - (1 - targets) * np.log(1 - outputs + eps), keepdims=True) compare_tensors(CrossEntropyBinary(), outputs, targets, expected_result, tol=1e-5) def test_cross_entropy_binary_derivative(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) # bprop assumes shortcut expected_result = ((outputs - targets) / outputs.shape[1]) compare_tensors( CrossEntropyBinary(), outputs, targets, expected_result, deriv=True, tol=1e-6) """ CrossEntropyMulti """ def test_cross_entropy_multi(backend_default): outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1)) targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1)) eps = 2 ** -23 expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)), axis=0, keepdims=True) compare_tensors(CrossEntropyMulti(), outputs, targets, expected_result, tol=1e-6) def test_cross_entropy_multi_limits(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) eps = 2 ** -23 expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)), axis=0, keepdims=True) compare_tensors(CrossEntropyMulti(), outputs, targets, expected_result, tol=1e-5) def test_cross_entropy_multi_derivative(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) expected_result = ((outputs - targets) / outputs.shape[1]) compare_tensors(CrossEntropyMulti(), outputs, targets, expected_result, deriv=True, tol=1e-6) """ SumSquared """ def test_sum_squared(backend_default): outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1)) targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1)) expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2. compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-8) def test_sum_squared_limits(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2. compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-7) def test_sum_squared_derivative(backend_default): outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1)) targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1)) expected_result = (outputs - targets) / outputs.shape[1] compare_tensors(SumSquared(), outputs, targets, expected_result, deriv=True, tol=1e-8) """ Misclassification """ def compare_metric(func, y, t, outputs, deriv=False, tol=0.): be = NervanaObject.be dtypeu = np.float32 temp = func(be.array(dtypeu(y)), be.array(dtypeu(t))) cond = np.sum(np.abs(temp - outputs) <= tol) assert cond == np.prod(outputs.shape) def test_misclassification(backend_default): NervanaObject.be.bsz = 3 outputs = np.array( [[0.25, 0.99, 0.33], [0.5, 0.005, 0.32], [0.25, 0.005, 0.34]]) targets = np.array([[0, 1, 0], [1, 0, 1], [0, 0, 0]])<|fim▁hole|> outputs, targets, expected_result, tol=1e-7)<|fim▁end|>
expected_result = np.ones((1, 1)) / 3. compare_metric(Misclassification(),
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import json<|fim▁hole|>import numpy as np import pandas as pd import numbers class NumpyAndPandasEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, (np.ndarray, np.matrix)): return [self.default(x) for x in obj] if isinstance(obj, pd.DataFrame): return [self.default(series) for _, series in obj.iterrows()] if isinstance(obj, pd.Series): return [self.default(val) for _, val in obj.iteritems()] if isinstance(obj, numbers.Integral): return int(obj) if isinstance(obj, (numbers.Real, numbers.Rational)): return float(obj) return super().default(obj)<|fim▁end|>
<|file_name|>DougLea.java<|end_file_name|><|fim▁begin|>/* Sample code file for CPJ2e. All code has been pasted directly from the camera-ready copy, and then modified in the smallest possible way to ensure that it will compile -- adding import statements or full package qualifiers for some class names, adding stand-ins for classes and methods that are referred to but not listed, and supplying dummy arguments instead of "..."). They are presented in page-number order. */ import com.sun.corba.se.impl.orbutil.concurrent.CondVar; import com.sun.corba.se.impl.orbutil.concurrent.Sync; import java.applet.Applet; import java.awt.Canvas; import java.awt.Color; import java.awt.Dimension; import java.awt.Graphics; import java.awt.Point; import java.beans.PropertyVetoException; import java.beans.VetoableChangeListener; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.lang.reflect.InvocationTargetException; import java.net.ServerSocket; import java.net.Socket; import java.net.URL; import java.util.ArrayList; import java.util.ConcurrentModificationException; import java.util.Date; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.LinkedList; import java.util.NoSuchElementException; import java.util.Random; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.locks.ReadWriteLock; class Helper { // Dummy standin for referenced generic "Helper" classes void handle() {} void operation() {} } class Particle { protected int x; protected int y; protected final Random rng = new Random(); public Particle(int initialX, int initialY) { x = initialX; y = initialY; } public synchronized void move() { x += rng.nextInt(10) - 5; y += rng.nextInt(20) - 10; } public void draw(Graphics g) { int lx, ly; synchronized (this) { lx = x; ly = y; } g.drawRect(lx, ly, 10, 10); } } class ParticleCanvas extends Canvas { private Particle[] particles = new Particle[0]; ParticleCanvas(int size) { setSize(new Dimension(size, size)); } // Intended to be called by applet synchronized void setParticles(Particle[] ps) { if (ps == null) throw new IllegalArgumentException("Cannot set null"); particles = ps; } protected synchronized Particle[] getParticles() { return particles; } public void paint(Graphics g) { // override Canvas.paint Particle[] ps = getParticles(); for (int i = 0; i < ps.length; ++i) ps[i].draw(g); } } class ParticleApplet extends Applet { protected Thread[] threads; // null when not running protected final ParticleCanvas canvas = new ParticleCanvas(100); public void init() { add(canvas); } protected Thread makeThread(final Particle p) { // utility Runnable runloop = new Runnable() { public void run() { try { for(;;) { p.move(); canvas.repaint(); Thread.sleep(100); // 100msec is arbitrary } } catch (InterruptedException e) { return; } } }; return new Thread(runloop); } public synchronized void start() { int n = 10; // just for demo if (threads == null) { // bypass if already started Particle[] particles = new Particle[n]; for (int i = 0; i < n; ++i) particles[i] = new Particle(50, 50); canvas.setParticles(particles); threads = new Thread[n]; for (int i = 0; i < n; ++i) { threads[i] = makeThread(particles[i]); threads[i].start(); } } } public synchronized void stop() { if (threads != null) { // bypass if already stopped for (int i = 0; i < threads.length; ++i) threads[i].interrupt(); threads = null; } } } class AssertionError extends java.lang.Error { public AssertionError() { super(); } public AssertionError(String message) { super(message); } } interface Tank { float getCapacity(); float getVolume(); void transferWater(float amount) throws OverflowException, UnderflowException; } class OverflowException extends Exception {} class UnderflowException extends Exception {} class TankImpl { public float getCapacity() { return 1.0f; } public float getVolume() { return 1.0f; } public void transferWater(float amount) throws OverflowException, UnderflowException {} } class Performer { public void perform() {} } class AdaptedPerformer implements Runnable { private final Performer adaptee; public AdaptedPerformer(Performer p) { adaptee = p; } public void run() { adaptee.perform(); } } class AdaptedTank implements Tank { protected final Tank delegate; public AdaptedTank(Tank t) { delegate = t; } public float getCapacity() { return delegate.getCapacity(); } public float getVolume() { return delegate.getVolume(); } protected void checkVolumeInvariant() throws AssertionError { float v = getVolume(); float c = getCapacity(); if ( !(v >= 0.0 && v <= c) ) throw new AssertionError(); } public synchronized void transferWater(float amount) throws OverflowException, UnderflowException { checkVolumeInvariant(); // before-check try { delegate.transferWater(amount); } // postpone rethrows until after-check catch (OverflowException ex) { throw ex; } catch (UnderflowException ex) { throw ex; } finally { checkVolumeInvariant(); // after-check } } } abstract class AbstractTank implements Tank { protected void checkVolumeInvariant() throws AssertionError { // ... identical to AdaptedTank version ... } protected abstract void doTransferWater(float amount) throws OverflowException, UnderflowException; public synchronized void transferWater(float amount) throws OverflowException, UnderflowException { // identical to AdaptedTank version except for inner call: // ... try { doTransferWater(amount); } finally {} // ... } } class ConcreteTank extends AbstractTank { protected final float capacity = 10.f; protected float volume; // ... public float getVolume() { return volume; } public float getCapacity() { return capacity; } protected void doTransferWater(float amount) throws OverflowException, UnderflowException { // ... implementation code ... } } interface TankOp { void op() throws OverflowException, UnderflowException; } class TankWithMethodAdapter { // ... protected void checkVolumeInvariant() throws AssertionError { // ... identical to AdaptedTank version ... } protected void runWithinBeforeAfterChecks(TankOp cmd) throws OverflowException, UnderflowException { // identical to AdaptedTank.transferWater // except for inner call: // ... try { cmd.op(); } finally {} // ... } protected void doTransferWater(float amount) throws OverflowException, UnderflowException { // ... implementation code ... } public synchronized void transferWater(final float amount) throws OverflowException, UnderflowException { runWithinBeforeAfterChecks(new TankOp() { public void op() throws OverflowException, UnderflowException { doTransferWater(amount); } }); } } class StatelessAdder { public int add(int a, int b) { return a + b; } } class ImmutableAdder { private final int offset; public ImmutableAdder(int a) { offset = a; } public int addOffset(int b) { return offset + b; } } class Fraction { // Fragments protected final long numerator; protected final long denominator; public Fraction(long num, long den) { // normalize: boolean sameSign = (num >= 0) == (den >= 0); long n = (num >= 0)? num : -num; long d = (den >= 0)? den : -den; long g = gcd(n, d); numerator = (sameSign)? n / g : -n / g; denominator = d / g; } static long gcd(long a, long b) { // ... compute greatest common divisor ... return 1; } public Fraction plus(Fraction f) { return new Fraction(numerator * f.denominator + f.numerator * denominator, denominator * f.denominator); } public boolean equals(Object other) { // override default if (! (other instanceof Fraction) ) return false; Fraction f = (Fraction)(other); return numerator * f.denominator == denominator * f.numerator; } public int hashCode() { // override default return (int) (numerator ^ denominator); } } class Server { void doIt() {} } class Relay { protected final Server server; Relay(Server s) { server = s; } void doIt() { server.doIt(); } } class Even { // Do not use private int n = 0; public int next(){ // POST?: next is always even ++n; ++n; return n; } } class ExpandableArray { protected Object[] data; // the elements protected int size = 0; // the number of array slots used // INV: 0 <= size <= data.length public ExpandableArray(int cap) { data = new Object[cap]; } public synchronized int size() { return size; } public synchronized Object get(int i) // subscripted access throws NoSuchElementException { if (i < 0 || i >= size ) throw new NoSuchElementException(); return data[i]; } public synchronized void add(Object x) { // add at end if (size == data.length) { // need a bigger array Object[] olddata = data; data = new Object[3 * (size + 1) / 2]; System.arraycopy(olddata, 0, data, 0, olddata.length); } data[size++] = x; } public synchronized void removeLast() throws NoSuchElementException { if (size == 0) throw new NoSuchElementException(); data[--size] = null; } } interface Procedure { void apply(Object obj); } class ExpandableArrayWithApply extends ExpandableArray { public ExpandableArrayWithApply(int cap) { super(cap); } synchronized void applyToAll(Procedure p) { for (int i = 0; i < size; ++i) p.apply(data[i]); } } class ExpandableArrayWithIterator extends ExpandableArray { protected int version = 0; public ExpandableArrayWithIterator(int cap) { super(cap); } public synchronized void removeLast() throws NoSuchElementException { super.removeLast(); ++version; // advertise update } public synchronized void add(Object x) { super.add(x); ++version; } public synchronized Iterator iterator() { return new EAIterator(); } protected class EAIterator implements Iterator { protected final int currentVersion; protected int currentIndex = 0; EAIterator() { currentVersion = version; } public Object next() { synchronized(ExpandableArrayWithIterator.this) { if (currentVersion != version) throw new ConcurrentModificationException(); else if (currentIndex == size) throw new NoSuchElementException(); else return data[currentIndex++]; } } public boolean hasNext() { synchronized(ExpandableArrayWithIterator.this) { return (currentIndex < size); } } public void remove() { // similar } } } class LazySingletonCounter { private final long initial; private long count; private LazySingletonCounter() { initial = Math.abs(new java.util.Random().nextLong() / 2); count = initial; } private static LazySingletonCounter s = null; private static final Object classLock = LazySingletonCounter.class; public static LazySingletonCounter instance() { synchronized(classLock) { if (s == null) s = new LazySingletonCounter(); return s; } } public long next() { synchronized(classLock) { return count++; } } public void reset() { synchronized(classLock) { count = initial; } } } class EagerSingletonCounter { private final long initial; private long count; private EagerSingletonCounter() { initial = Math.abs(new java.util.Random().nextLong() / 2); count = initial; } private static final EagerSingletonCounter s = new EagerSingletonCounter(); public static EagerSingletonCounter instance() { return s; } public synchronized long next() { return count++; } public synchronized void reset() { count = initial; } } class StaticCounter { private static final long initial = Math.abs(new java.util.Random().nextLong() / 2); private static long count = initial; private StaticCounter() { } // disable instance construction public static synchronized long next() { return count++; } public static synchronized void reset() { count = initial; } } class Cell { // Do not use private long value; synchronized long getValue() { return value; } synchronized void setValue(long v) { value = v; } synchronized void swapValue(Cell other) { long t = getValue(); long v = other.getValue(); setValue(v); other.setValue(t); } } class Cell2 { // Do not use private long value; synchronized long getValue() { return value; } synchronized void setValue(long v) { value = v; } public void swapValue(Cell2 other) { if (other == this) // alias check return; else if (System.identityHashCode(this) < System.identityHashCode(other)) this.doSwapValue(other); else other.doSwapValue(this); } protected synchronized void doSwapValue(Cell2 other) { // same as original public version: long t = getValue(); long v = other.getValue(); setValue(v); other.setValue(t); } protected synchronized void doSwapValueV2(Cell2 other) { synchronized(other) { long t = value; value = other.value; other.value = t; } } } final class SetCheck { private int a = 0; private long b = 0; void set() { a = 1; b = -1; } boolean check() { return ((b == 0) || (b == -1 && a == 1)); } } final class VFloat { private float value; final synchronized void set(float f) { value = f; } final synchronized float get() { return value; } } class Plotter { // fragments // ... public void showNextPoint() { Point p = new Point(); p.x = computeX(); p.y = computeY(); display(p); } int computeX() { return 1; } int computeY() { return 1; } protected void display(Point p) { // somehow arrange to show p. } } class SessionBasedService { // Fragments // ... public void service() { OutputStream output = null; try { output = new FileOutputStream("..."); doService(output); } catch (IOException e) { handleIOFailure(); } finally { try { if (output != null) output.close(); } catch (IOException ignore) {} // ignore exception in close } } void handleIOFailure() {} void doService(OutputStream s) throws IOException { s.write(0); // ... possibly more handoffs ... } } class ThreadPerSessionBasedService { // fragments // ... public void service() { Runnable r = new Runnable() { public void run() { OutputStream output = null; try { output = new FileOutputStream("..."); doService(output); } catch (IOException e) { handleIOFailure(); } finally { try { if (output != null) output.close(); } catch (IOException ignore) {} } } }; new Thread(r).start(); } void handleIOFailure() {} void doService(OutputStream s) throws IOException { s.write(0); // ... possibly more hand-offs ... } } class ThreadWithOutputStream extends Thread { private OutputStream output; ThreadWithOutputStream(Runnable r, OutputStream s) { super(r); output = s; } static ThreadWithOutputStream current() throws ClassCastException { return (ThreadWithOutputStream) (currentThread()); } static OutputStream getOutput() { return current().output; } static void setOutput(OutputStream s) { current().output = s;} } class ServiceUsingThreadWithOutputStream { // Fragments // ... public void service() throws IOException { OutputStream output = new FileOutputStream("..."); Runnable r = new Runnable() { public void run() { try { doService(); } catch (IOException e) { } } }; new ThreadWithOutputStream(r, output).start(); } void doService() throws IOException { ThreadWithOutputStream.current().getOutput().write(0); } } class ServiceUsingThreadLocal { // Fragments static ThreadLocal output = new ThreadLocal(); public void service() { try { final OutputStream s = new FileOutputStream("..."); Runnable r = new Runnable() { public void run() { output.set(s); try { doService(); } catch (IOException e) { } finally { try { s.close(); } catch (IOException ignore) {} } } }; new Thread(r).start(); } catch (IOException e) {} } void doService() throws IOException { ((OutputStream)(output.get())).write(0); // ... } } class BarePoint { public double x; public double y; } class SynchedPoint { protected final BarePoint delegate = new BarePoint(); public synchronized double getX() { return delegate.x;} public synchronized double getY() { return delegate.y; } public synchronized void setX(double v) { delegate.x = v; } public synchronized void setY(double v) { delegate.y = v; } } class Address { // Fragments protected String street; protected String city; public String getStreet() { return street; } public void setStreet(String s) { street = s; } // ... public void printLabel(OutputStream s) { } } class SynchronizedAddress extends Address { // ... public synchronized String getStreet() { return super.getStreet(); } public synchronized void setStreet(String s) { super.setStreet(s); } public synchronized void printLabel(OutputStream s) { super.printLabel(s); } } class Printer { public void printDocument(byte[] doc) { /* ... */ } // ... } class PrintService { protected PrintService neighbor = null; // node to take from protected Printer printer = null; public synchronized void print(byte[] doc) { getPrinter().printDocument(doc); } protected Printer getPrinter() { // PRE: synch lock held if (printer == null) // need to take from neighbor printer = neighbor.takePrinter(); return printer; } synchronized Printer takePrinter() { // called from others if (printer != null) { Printer p = printer; // implement take protocol printer = null; return p; } else return neighbor.takePrinter(); // propagate } // initialization methods called only during start-up synchronized void setNeighbor(PrintService n) { neighbor = n; } synchronized void givePrinter(Printer p) { printer = p; } // Sample code to initialize a ring of new services public static void startUpServices(int nServices, Printer p) throws IllegalArgumentException { if (nServices <= 0 || p == null) throw new IllegalArgumentException(); PrintService first = new PrintService(); PrintService pred = first; for (int i = 1; i < nServices; ++i) { PrintService s = new PrintService(); s.setNeighbor(pred); pred = s; } first.setNeighbor(pred); first.givePrinter(p); } } class AnimationApplet extends Applet { // Fragments // ... int framesPerSecond; // default zero is illegal value void animate() { try { if (framesPerSecond == 0) { // the unsynchronized check synchronized(this) { if (framesPerSecond == 0) { // the double-check String param = getParameter("fps"); framesPerSecond = Integer.parseInt(param); } } } } catch (Exception e) {} // ... actions using framesPerSecond ... } } class ServerWithStateUpdate { private double state; private final Helper helper = new Helper(); public synchronized void service() { state = 2.0f; // ...; // set to some new value helper.operation(); } public synchronized double getState() { return state; } } class ServerWithOpenCall { private double state; private final Helper helper = new Helper(); private synchronized void updateState() { state = 2.0f; // ...; // set to some new value } public void service() { updateState(); helper.operation(); } public synchronized double getState() { return state; } } class ServerWithAssignableHelper { private double state; private Helper helper = new Helper(); synchronized void setHelper(Helper h) { helper = h; } public void service() { Helper h; synchronized(this) { state = 2.0f; // ... h = helper; } h.operation(); } public synchronized void synchedService() { // see below service(); } } class LinkedCell { protected int value; protected final LinkedCell next; public LinkedCell(int v, LinkedCell t) { value = v; next = t; } public synchronized int value() { return value; } public synchronized void setValue(int v) { value = v; } public int sum() { // add up all element values return (next == null) ? value() : value() + next.sum(); } public boolean includes(int x) { // search for x return (value() == x) ? true: (next == null)? false : next.includes(x); } } class Shape { // Incomplete protected double x = 0.0; protected double y = 0.0; protected double width = 0.0; protected double height = 0.0; public synchronized double x() { return x;} public synchronized double y() { return y; } public synchronized double width() { return width;} public synchronized double height() { return height; } public synchronized void adjustLocation() { x = 1; // longCalculation1(); y = 2; //longCalculation2(); } public synchronized void adjustDimensions() { width = 3; // longCalculation3(); height = 4; // longCalculation4(); } // ... } class PassThroughShape { protected final AdjustableLoc loc = new AdjustableLoc(0, 0); protected final AdjustableDim dim = new AdjustableDim(0, 0); public double x() { return loc.x(); } public double y() { return loc.y(); } public double width() { return dim.width(); } public double height() { return dim.height(); } public void adjustLocation() { loc.adjust(); } public void adjustDimensions() { dim.adjust(); } } class AdjustableLoc { protected double x; protected double y; public AdjustableLoc(double initX, double initY) { x = initX; y = initY; } public synchronized double x() { return x;} public synchronized double y() { return y; } public synchronized void adjust() { x = longCalculation1(); y = longCalculation2(); } protected double longCalculation1() { return 1; /* ... */ } protected double longCalculation2() { return 2; /* ... */ } } class AdjustableDim { protected double width; protected double height; public AdjustableDim(double initW, double initH) { width = initW; height = initH; } public synchronized double width() { return width;} public synchronized double height() { return height; } public synchronized void adjust() { width = longCalculation3(); height = longCalculation4(); } protected double longCalculation3() { return 3; /* ... */ } protected double longCalculation4() { return 4; /* ... */ } } class LockSplitShape { // Incomplete protected double x = 0.0; protected double y = 0.0; protected double width = 0.0; protected double height = 0.0; protected final Object locationLock = new Object(); protected final Object dimensionLock = new Object(); public double x() { synchronized(locationLock) { return x; } } public double y() { synchronized(locationLock) { return y; } } public void adjustLocation() { synchronized(locationLock) { x = 1; // longCalculation1(); y = 2; // longCalculation2(); } } // and so on } class SynchronizedInt { private int value; public SynchronizedInt(int v) { value = v; } public synchronized int get() { return value; } public synchronized int set(int v) { // returns previous value int oldValue = value; value = v; return oldValue; } public synchronized int increment() { return ++value; } // and so on } class Person { // Fragments // ... protected final SynchronizedInt age = new SynchronizedInt(0); protected final SynchronizedBoolean isMarried = new SynchronizedBoolean(false); protected final SynchronizedDouble income = new SynchronizedDouble(0.0); public int getAge() { return age.get(); } public void birthday() { age.increment(); } // ... } class LinkedQueue { protected Node head = new Node(null); protected Node last = head; protected final Object pollLock = new Object(); protected final Object putLock = new Object(); public void put(Object x) { Node node = new Node(x); synchronized (putLock) { // insert at end of list synchronized (last) { last.next = node; // extend list last = node; } } } public Object poll() { // returns null if empty synchronized (pollLock) { synchronized (head) { Object x = null; Node first = head.next; // get to first real node if (first != null) { x = first.object; first.object = null; // forget old object head = first; // first becomes new head } return x; } } } static class Node { // local node class for queue Object object; Node next = null; Node(Object x) { object = x; } } } class InsufficientFunds extends Exception {} interface Account { long balance(); } interface UpdatableAccount extends Account { void credit(long amount) throws InsufficientFunds; void debit(long amount) throws InsufficientFunds; } // Sample implementation of updatable version class UpdatableAccountImpl implements UpdatableAccount { private long currentBalance; public UpdatableAccountImpl(long initialBalance) { currentBalance = initialBalance; } public synchronized long balance() { return currentBalance; } public synchronized void credit(long amount) throws InsufficientFunds { if (amount >= 0 || currentBalance >= -amount) currentBalance += amount; else throw new InsufficientFunds(); } public synchronized void debit(long amount) throws InsufficientFunds { credit(-amount); } } final class ImmutableAccount implements Account { private Account delegate; public ImmutableAccount(long initialBalance) { delegate = new UpdatableAccountImpl(initialBalance); } ImmutableAccount(Account acct) { delegate = acct; } public long balance() { // forward the immutable method return delegate.balance(); } } class AccountRecorder { // A logging facility public void recordBalance(Account a) { System.out.println(a.balance()); // or record in file } } class AccountHolder { private UpdatableAccount acct = new UpdatableAccountImpl(0); private AccountRecorder recorder; public AccountHolder(AccountRecorder r) { recorder = r; } public synchronized void acceptMoney(long amount) { try { acct.credit(amount); recorder.recordBalance(new ImmutableAccount(acct));//(*) } catch (InsufficientFunds ex) { System.out.println("Cannot accept negative amount."); } } } class EvilAccountRecorder extends AccountRecorder { private long embezzlement; // ... public void recordBalance(Account a) { super.recordBalance(a); if (a instanceof UpdatableAccount) { UpdatableAccount u = (UpdatableAccount)a; try { u.debit(10); embezzlement += 10; } catch (InsufficientFunds quietlyignore) {} } } } class ImmutablePoint { private final int x; private final int y; public ImmutablePoint(int initX, int initY) { x = initX; y = initY; } public int x() { return x; } public int y() { return y; } } class Dot { protected ImmutablePoint loc; public Dot(int x, int y) { loc = new ImmutablePoint(x, y); } public synchronized ImmutablePoint location() { return loc; } protected synchronized void updateLoc(ImmutablePoint newLoc) { loc = newLoc; } public void moveTo(int x, int y) { updateLoc(new ImmutablePoint(x, y)); } public synchronized void shiftX(int delta) { updateLoc(new ImmutablePoint(loc.x() + delta, loc.y())); } } class CopyOnWriteArrayList { // Incomplete protected Object[] array = new Object[0]; protected synchronized Object[] getArray() { return array; } public synchronized void add(Object element) { int len = array.length; Object[] newArray = new Object[len+1]; System.arraycopy(array, 0, newArray, 0, len); newArray[len] = element; array = newArray; } public Iterator iterator() { return new Iterator() { protected final Object[] snapshot = getArray(); protected int cursor = 0; public boolean hasNext() { return cursor < snapshot.length; } public Object next() { try { return snapshot[cursor++]; } catch (IndexOutOfBoundsException ex) { throw new NoSuchElementException(); } } public void remove() {} }; } } class State {} class Optimistic { // Generic code sketch private State state; // reference to representation object private synchronized State getState() { return state; } private synchronized boolean commit(State assumed, State next) { if (state == assumed) { state = next; return true; } else return false; } } class OptimisticDot { protected ImmutablePoint loc; public OptimisticDot(int x, int y) { loc = new ImmutablePoint(x, y); } public synchronized ImmutablePoint location() { return loc; } protected synchronized boolean commit(ImmutablePoint assumed, ImmutablePoint next) { if (loc == assumed) { loc = next; return true; } else return false; } public synchronized void moveTo(int x, int y) { // bypass commit since unconditional loc = new ImmutablePoint(x, y); } public void shiftX(int delta) { boolean success = false; do { ImmutablePoint old = location(); ImmutablePoint next = new ImmutablePoint(old.x() + delta, old.y()); success = commit(old, next); } while (!success); } } class ParticleUsingMutex { protected int x; protected int y; protected final Random rng = new Random(); protected final Mutex mutex = new Mutex(); public ParticleUsingMutex(int initialX, int initialY) { x = initialX; y = initialY; } public void move() { try { mutex.acquire(); try { x += rng.nextInt(10) - 5; y += rng.nextInt(20) - 10; } finally { mutex.release(); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } public void draw(Graphics g) { int lx, ly; try { mutex.acquire(); try { lx = x; ly = y; } finally { mutex.release(); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); return; } g.drawRect(lx, ly, 10, 10); } } class WithMutex { private final Mutex mutex; public WithMutex(Mutex m) { mutex = m; } public void perform(Runnable r) throws InterruptedException { mutex.acquire(); try { r.run(); } finally { mutex.release(); } } } class ParticleUsingWrapper { // Incomplete protected int x; protected int y; protected final Random rng = new Random(); protected final WithMutex withMutex = new WithMutex(new Mutex()); protected void doMove() { x += rng.nextInt(10) - 5; y += rng.nextInt(20) - 10; } public void move() { try { withMutex.perform(new Runnable() { public void run() { doMove(); } }); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } // ... } class CellUsingBackoff { private long value; private final Mutex mutex = new Mutex(); void swapValue(CellUsingBackoff other) { if (this == other) return; // alias check required for (;;) { try { mutex.acquire(); try { if (other.mutex.attempt(0)) { try { long t = value; value = other.value; other.value = t; return; } finally { other.mutex.release(); } } } finally { mutex.release(); }; Thread.sleep(100); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); return; } } } } class CellUsingReorderedBackoff { private long value; private final Mutex mutex = new Mutex(); private static boolean trySwap(CellUsingReorderedBackoff a, CellUsingReorderedBackoff b) throws InterruptedException { boolean success = false; if (a.mutex.attempt(0)) { try { if (b.mutex.attempt(0)) { try { long t = a.value; a.value = b.value; b.value = t; success = true; } finally { b.mutex.release(); } } } finally { a.mutex.release(); } } return success; } void swapValue(CellUsingReorderedBackoff other) { if (this == other) return; // alias check required try { while (!trySwap(this, other) && !trySwap(other, this)) Thread.sleep(100); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } } class ListUsingMutex { static class Node { Object item; Node next; Mutex lock = new Mutex(); // each node keeps its own lock Node(Object x, Node n) { item = x; next = n; } } protected Node head; // pointer to first node of list // Use plain synchronization to protect head field. // (We could instead use a Mutex here too but there is no // reason to do so.) protected synchronized Node getHead() { return head; } public synchronized void add(Object x) { // simple prepend // for simplicity here, do not allow null elements if (x == null) throw new IllegalArgumentException(); // The use of synchronized here protects only head field. // The method does not need to wait out other traversers // who have already made it past head node. head = new Node(x, head); } boolean search(Object x) throws InterruptedException { Node p = getHead(); if (p == null || x == null) return false; p.lock.acquire(); // Prime loop by acquiring first lock. // If above acquire fails due to interrupt, the method will // throw InterruptedException now, so there is no need for // further cleanup. for (;;) { Node nextp = null; boolean found; try { found = x.equals(p.item); if (!found) { nextp = p.next; if (nextp != null) { try { // Acquire next lock // while still holding current nextp.lock.acquire(); } catch (InterruptedException ie) { throw ie; // Note that finally clause will // execute before the throw } } } } finally { p.lock.release(); } if (found) return true; else if (nextp == null) return false; else p = nextp; } } // ... other similar traversal and update methods ... } class DataRepository { // code sketch protected final ReadWriteLock rw = new WriterPreferenceReadWriteLock(); public void access() throws InterruptedException { rw.readLock().acquire(); try { /* read data */ } finally { rw.readLock().release(); } } public void modify() throws InterruptedException { rw.writeLock().acquire(); try { /* write data */ } finally { rw.writeLock().release(); } } } class ClientUsingSocket { // Code sketch int portnumber = 1234; String server = "gee"; // ... Socket retryUntilConnected() throws InterruptedException { // first delay is randomly chosen between 5 and 10secs long delayTime = 5000 + (long)(Math.random() * 5000); for (;;) { try { return new Socket(server, portnumber); } catch (IOException ex) { Thread.sleep(delayTime); delayTime = delayTime * 3 / 2 + 1; // increase 50% } } } } class ServiceException extends Exception {} interface ServerWithException { void service() throws ServiceException; } interface ServiceExceptionHandler { void handle(ServiceException e); } class ServerImpl implements ServerWithException { public void service() throws ServiceException {} } class HandlerImpl implements ServiceExceptionHandler { public void handle(ServiceException e) {} } class HandledService implements ServerWithException { final ServerWithException server = new ServerImpl(); final ServiceExceptionHandler handler = new HandlerImpl(); public void service() { // no throw clause try { server.service(); } catch (ServiceException e) { handler.handle(e); } } } class ExceptionEvent extends java.util.EventObject { public final Throwable theException; public ExceptionEvent(Object src, Throwable ex) { super(src); theException = ex; } } class ExceptionEventListener { // Incomplete public void exceptionOccured(ExceptionEvent ee) { // ... respond to exception... } } class ServiceIssuingExceptionEvent { // Incomplete // ... private final CopyOnWriteArrayList handlers = new CopyOnWriteArrayList(); public void addHandler(ExceptionEventListener h) { handlers.add(h); } public void service() { // ... boolean failed = true; if (failed) { Throwable ex = new ServiceException(); ExceptionEvent ee = new ExceptionEvent(this, ex); for (Iterator it = handlers.iterator(); it.hasNext();) { ExceptionEventListener l = (ExceptionEventListener)(it.next()); l.exceptionOccured(ee); } } } } class CancellableReader { // Incomplete private Thread readerThread; // only one at a time supported private FileInputStream dataFile; public synchronized void startReaderThread() throws IllegalStateException, FileNotFoundException { if (readerThread != null) throw new IllegalStateException(); dataFile = new FileInputStream("data"); readerThread = new Thread(new Runnable() { public void run() { doRead(); } }); readerThread.start(); } protected synchronized void closeFile() { // utility method if (dataFile != null) { try { dataFile.close(); } catch (IOException ignore) {} dataFile = null; } } void process(int b) {} private void doRead() { try { while (!Thread.interrupted()) { try { int c = dataFile.read(); if (c == -1) break; else process(c); } catch (IOException ex) { break; // perhaps first do other cleanup } } } finally { closeFile(); synchronized(this) { readerThread = null; } } } public synchronized void cancelReaderThread() { if (readerThread != null) readerThread.interrupt(); closeFile(); } } class ReaderWithTimeout { // Generic code sketch // ... void process(int b) {} void attemptRead(InputStream stream, long timeout) throws Exception { long startTime = System.currentTimeMillis(); try { for (;;) { if (stream.available() > 0) { int c = stream.read(); if (c != -1) process(c); else break; // eof } else { try { Thread.sleep(100); // arbitrary back-off time } catch (InterruptedException ie) { /* ... quietly wrap up and return ... */ } long now = System.currentTimeMillis(); if (now - startTime >= timeout) { /* ... fail ...*/ } } } } catch (IOException ex) { /* ... fail ... */ } } } class C { // Fragments private int v; // invariant: v >= 0 synchronized void f() { v = -1 ; // temporarily set to illegal value as flag compute(); // possible stop point (*) v = 1; // set to legal value } synchronized void g() { while (v != 0) { --v; something(); } } void compute() {} void something() {} } class Terminator { // Try to kill; return true if known to be dead static boolean terminate(Thread t, long maxWaitToDie) { if (!t.isAlive()) return true; // already dead // phase 1 -- graceful cancellation t.interrupt(); try { t.join(maxWaitToDie); } catch(InterruptedException e){} // ignore if (!t.isAlive()) return true; // success // phase 2 -- trap all security checks // theSecurityMgr.denyAllChecksFor(t); // a made-up method try { t.join(maxWaitToDie); } catch(InterruptedException ex) {} if (!t.isAlive()) return true; // phase 3 -- minimize damage t.setPriority(Thread.MIN_PRIORITY); return false; } } interface BoundedCounter { static final long MIN = 0; // minimum allowed value static final long MAX = 10; // maximum allowed value long count(); // INV: MIN <= count() <= MAX // INIT: count() == MIN void inc(); // only allowed when count() < MAX void dec(); // only allowed when count() > MIN } class X { synchronized void w() throws InterruptedException { before(); wait(); after(); } synchronized void n() { notifyAll(); } void before() {} void after() {} } class GuardedClass { // Generic code sketch protected boolean cond = false; // PRE: lock held protected void awaitCond() throws InterruptedException{ while (!cond) wait(); } public synchronized void guardedAction() { try { awaitCond(); } catch (InterruptedException ie) { // fail } // actions } } class SimpleBoundedCounter { static final long MIN = 0; // minimum allowed value static final long MAX = 10; // maximum allowed value protected long count = MIN; public synchronized long count() { return count; } public synchronized void inc() throws InterruptedException { awaitUnderMax(); setCount(count + 1); } public synchronized void dec() throws InterruptedException { awaitOverMin(); setCount(count - 1); } protected void setCount(long newValue) { // PRE: lock held count = newValue; notifyAll(); // wake up any thread depending on new value } protected void awaitUnderMax() throws InterruptedException { while (count == MAX) wait(); } protected void awaitOverMin() throws InterruptedException { while (count == MIN) wait(); } } class GuardedClassUsingNotify { protected boolean cond = false; protected int nWaiting = 0; // count waiting threads protected synchronized void awaitCond() throws InterruptedException { while (!cond) { ++nWaiting; // record fact that a thread is waiting try { wait(); } catch (InterruptedException ie) { notify(); throw ie; } finally { --nWaiting; // no longer waiting } } } protected synchronized void signalCond() { if (cond) { // simulate notifyAll for (int i = nWaiting; i > 0; --i) { notify(); } } } } class GamePlayer implements Runnable { // Incomplete protected GamePlayer other; protected boolean myturn = false; protected synchronized void setOther(GamePlayer p) { other = p; } synchronized void giveTurn() { // called by other player myturn = true; notify(); // unblock thread } void releaseTurn() { GamePlayer p; synchronized(this) { myturn = false; p = other; } p.giveTurn(); // open call } synchronized void awaitTurn() throws InterruptedException { while (!myturn) wait(); } void move() { /*... perform one move ... */ } public void run() { try { for (;;) { awaitTurn(); move(); releaseTurn(); } } catch (InterruptedException ie) {} // die } public static void main(String[] args) { GamePlayer one = new GamePlayer(); GamePlayer two = new GamePlayer(); one.setOther(two); two.setOther(one); one.giveTurn(); new Thread(one).start(); new Thread(two).start(); } } //class TimeoutException extends InterruptedException { ... } class TimeOutBoundedCounter { static final long MIN = 0; // minimum allowed value static final long MAX = 10; // maximum allowed value protected long count = 0; protected long TIMEOUT = 5000; // for illustration // ... synchronized void inc() throws InterruptedException { if (count >= MAX) { long start = System.currentTimeMillis(); long waitTime = TIMEOUT; for (;;) { if (waitTime <= 0) throw new TimeoutException(TIMEOUT); else { try { wait(waitTime); } catch (InterruptedException ie) { throw ie; // coded this way just for emphasis } if (count < MAX) break; else { long now = System.currentTimeMillis(); waitTime = TIMEOUT - (now - start); } } } } ++count; notifyAll(); } synchronized void dec() throws InterruptedException { // ... similar ... } } class SpinLock { // Avoid needing to use this private volatile boolean busy = false; synchronized void release() { busy = false; } void acquire() throws InterruptedException { int itersBeforeYield = 100; // 100 is arbitrary int itersBeforeSleep = 200; // 200 is arbitrary long sleepTime = 1; // 1msec is arbitrary int iters = 0; for (;;) { if (!busy) { // test-and-test-and-set synchronized(this) { if (!busy) { busy = true; return; } } } if (iters < itersBeforeYield) { // spin phase ++iters; } else if (iters < itersBeforeSleep) { // yield phase ++iters; Thread.yield(); } else { // back-off phase Thread.sleep(sleepTime); sleepTime = 3 * sleepTime / 2 + 1; // 50% is arbitrary } } } } class BoundedBufferWithStateTracking { protected final Object[] array; // the elements protected int putPtr = 0; // circular indices protected int takePtr = 0; protected int usedSlots = 0; // the count public BoundedBufferWithStateTracking(int capacity) throws IllegalArgumentException { if (capacity <= 0) throw new IllegalArgumentException(); array = new Object[capacity]; } public synchronized int size() { return usedSlots; } public int capacity() { return array.length; } public synchronized void put(Object x) throws InterruptedException { while (usedSlots == array.length) // wait until not full wait(); array[putPtr] = x; putPtr = (putPtr + 1) % array.length; // cyclically inc if (usedSlots++ == 0) // signal if was empty notifyAll(); } public synchronized Object take() throws InterruptedException{ while (usedSlots == 0) // wait until not empty wait(); Object x = array[takePtr]; array[takePtr] = null; takePtr = (takePtr + 1) % array.length; if (usedSlots-- == array.length) // signal if was full notifyAll(); return x; } } class BoundedCounterWithStateVariable { static final long MIN = 0; // minimum allowed value static final long MAX = 10; // maximum allowed value static final int BOTTOM = 0, MIDDLE = 1, TOP = 2; protected int state = BOTTOM; // the state variable protected long count = MIN; protected void updateState() { // PRE: synch lock held int oldState = state; if (count == MIN) state = BOTTOM; else if (count == MAX) state = TOP; else state = MIDDLE; if (state != oldState && oldState != MIDDLE) notifyAll(); // notify on transition } public synchronized long count() { return count; } public synchronized void inc() throws InterruptedException { while (state == TOP) wait(); ++count; updateState(); } public synchronized void dec() throws InterruptedException { while (state == BOTTOM) wait(); --count; updateState(); } } class Inventory { protected final Hashtable items = new Hashtable(); protected final Hashtable suppliers = new Hashtable(); // execution state tracking variables: protected int storing = 0; // number of in-progress stores protected int retrieving = 0; // number of retrieves // ground actions: protected void doStore(String description, Object item, String supplier) { items.put(description, item); suppliers.put(supplier, description); } protected Object doRetrieve(String description) { Object x = items.get(description); if (x != null) items.remove(description); return x; } public void store(String description, Object item, String supplier) throws InterruptedException { synchronized(this) { // Before-action while (retrieving != 0) // don't overlap with retrieves wait(); ++storing; // record exec state } try { doStore(description, item, supplier); // Ground action } finally { // After-action synchronized(this) { // signal retrieves if (--storing == 0) // only necessary when hit zero notifyAll(); } } } public Object retrieve(String description) throws InterruptedException { synchronized(this) { // Before-action // wait until no stores or retrieves while (storing != 0 || retrieving != 0) wait(); ++retrieving; } try { return doRetrieve(description); // ground action } finally { synchronized(this) { // After-action if (--retrieving == 0) notifyAll(); } } } } abstract class ReadWrite { protected int activeReaders = 0; // threads executing read protected int activeWriters = 0; // always zero or one protected int waitingReaders = 0; // threads not yet in read protected int waitingWriters = 0; // same for write protected abstract void doRead(); // implement in subclasses protected abstract void doWrite(); public void read() throws InterruptedException { beforeRead(); try { doRead(); } finally { afterRead(); } } public void write() throws InterruptedException { beforeWrite(); try { doWrite(); } finally { afterWrite(); } } protected boolean allowReader() { return waitingWriters == 0 && activeWriters == 0; } protected boolean allowWriter() { return activeReaders == 0 && activeWriters == 0; } protected synchronized void beforeRead() throws InterruptedException { ++waitingReaders; while (!allowReader()) { try { wait(); } catch (InterruptedException ie) { --waitingReaders; // roll back state throw ie; } } --waitingReaders; ++activeReaders; } protected synchronized void afterRead() { --activeReaders; notifyAll(); } protected synchronized void beforeWrite() throws InterruptedException { ++waitingWriters; while (!allowWriter()) { try { wait(); } catch (InterruptedException ie) { --waitingWriters; throw ie; } } --waitingWriters; ++activeWriters; } protected synchronized void afterWrite() { --activeWriters; notifyAll(); } } class RWLock extends ReadWrite implements ReadWriteLock { // Incomplete class RLock implements Sync { public void acquire() throws InterruptedException { beforeRead(); } public void release() { afterRead(); } public boolean attempt(long msecs) throws InterruptedException{ return beforeRead(msecs); } } class WLock implements Sync { public void acquire() throws InterruptedException { beforeWrite(); } public void release() { afterWrite(); } public boolean attempt(long msecs) throws InterruptedException{ return beforeWrite(msecs); } } protected final RLock rlock = new RLock(); protected final WLock wlock = new WLock(); public Sync readLock() { return rlock; } public Sync writeLock() { return wlock; } public boolean beforeRead(long msecs) throws InterruptedException { return true; // ... time-out version of beforeRead ... } public boolean beforeWrite(long msecs) throws InterruptedException { return true; // ... time-out version of beforeWrite ... } protected void doRead() {} protected void doWrite() {} } class StackEmptyException extends Exception { } class Stack { // Fragments public synchronized boolean isEmpty() { return false; /* ... */ } public synchronized void push(Object x) { /* ... */ } public synchronized Object pop() throws StackEmptyException { if (isEmpty()) throw new StackEmptyException(); else return null; } } class WaitingStack extends Stack { public synchronized void push(Object x) { super.push(x); notifyAll(); } public synchronized Object waitingPop() throws InterruptedException { while (isEmpty()) { wait(); } try { return super.pop(); } catch (StackEmptyException cannothappen) { // only possible if pop contains a programming error throw new Error("Internal implementation error"); } } } class PartWithGuard { protected boolean cond = false; synchronized void await() throws InterruptedException { while (!cond) wait(); // any other code } synchronized void signal(boolean c) { cond = c; notifyAll(); } } class Host { protected final PartWithGuard part = new PartWithGuard(); synchronized void rely() throws InterruptedException { part.await(); } synchronized void set(boolean c) { part.signal(c); } } class OwnedPartWithGuard { // Code sketch protected boolean cond = false; final Object lock; OwnedPartWithGuard(Object owner) { lock = owner; } void await() throws InterruptedException { synchronized(lock) { while (!cond) lock.wait(); // ... } } void signal(boolean c) { synchronized(lock) { cond = c; lock.notifyAll(); } } } class Pool { // Incomplete protected java.util.ArrayList items = new ArrayList(); protected java.util.HashSet busy = new HashSet(); protected final Semaphore available; public Pool(int n) { available = new Semaphore(n); initializeItems(n); } public Object getItem() throws InterruptedException { available.acquire(); return doGet(); } public void returnItem(Object x) { if (doReturn(x)) available.release(); } protected synchronized Object doGet() { Object x = items.remove(items.size()-1); busy.add(x); // put in set to check returns return x; } protected synchronized boolean doReturn(Object x) { if (busy.remove(x)) { items.add(x); // put back into available item list return true; } else return false; } protected void initializeItems(int n) { // Somehow create the resource objects // and place them in items list. } } class BufferArray { protected final Object[] array; // the elements protected int putPtr = 0; // circular indices protected int takePtr = 0; BufferArray(int n) { array = new Object[n]; } synchronized void insert(Object x) { // put mechanics array[putPtr] = x; putPtr = (putPtr + 1) % array.length; } synchronized Object extract() { // take mechanics Object x = array[takePtr]; array[takePtr] = null; takePtr = (takePtr + 1) % array.length; return x; } } class BoundedBufferWithSemaphores { protected final BufferArray buff; protected final Semaphore putPermits; protected final Semaphore takePermits; public BoundedBufferWithSemaphores(int capacity) throws IllegalArgumentException { if (capacity <= 0) throw new IllegalArgumentException(); buff = new BufferArray(capacity); putPermits = new Semaphore(capacity); takePermits = new Semaphore(0); } public void put(Object x) throws InterruptedException { putPermits.acquire(); buff.insert(x); takePermits.release(); } public Object take() throws InterruptedException { takePermits.acquire(); Object x = buff.extract(); putPermits.release(); return x; } public Object poll(long msecs) throws InterruptedException { if (!takePermits.attempt(msecs)) return null; Object x = buff.extract(); putPermits.release(); return x; } public boolean offer(Object x, long msecs) throws InterruptedException { if (!putPermits.attempt(msecs)) return false; buff.insert(x); takePermits.release(); return true; } } class SynchronousChannel /* implements Channel */ { protected Object item = null; // to hold while in transit protected final Semaphore putPermit; protected final Semaphore takePermit; protected final Semaphore taken; public SynchronousChannel() { putPermit = new Semaphore(1); takePermit = new Semaphore(0); taken = new Semaphore(0); } public void put(Object x) throws InterruptedException { putPermit.acquire(); item = x; takePermit.release(); // Must wait until signalled by taker InterruptedException caught = null; for (;;) { try { taken.acquire(); break; } catch(InterruptedException ie) { caught = ie; } } if (caught != null) throw caught; // can now rethrow } public Object take() throws InterruptedException { takePermit.acquire(); Object x = item; item = null; putPermit.release(); taken.release(); return x; } } class Player implements Runnable { // Code sketch // ... protected final Latch startSignal; Player(Latch l) { startSignal = l; } public void run() { try { startSignal.acquire(); play(); } catch(InterruptedException ie) { return; } } void play() {} // ... } class Game { // ... void begin(int nplayers) { Latch startSignal = new Latch(); for (int i = 0; i < nplayers; ++i) new Thread(new Player(startSignal)).start(); startSignal.release(); } } class LatchingThermometer { // Seldom useful private volatile boolean ready; // latching private volatile float temperature; public double getReading() { while (!ready) Thread.yield(); return temperature; } void sense(float t) { // called from sensor temperature = t; ready = true; } } class FillAndEmpty { // Incomplete static final int SIZE = 1024; // buffer size, for demo protected Rendezvous exchanger = new Rendezvous(2); protected byte readByte() { return 1; /* ... */; } protected void useByte(byte b) { /* ... */ } public void start() { new Thread(new FillingLoop()).start(); new Thread(new EmptyingLoop()).start(); } class FillingLoop implements Runnable { // inner class public void run() { byte[] buffer = new byte[SIZE]; int position = 0; try { for (;;) { if (position == SIZE) { buffer = (byte[])(exchanger.rendezvous(buffer)); position = 0; } buffer[position++] = readByte(); } } catch (BrokenBarrierException ex) {} // die catch (InterruptedException ie) {} // die } } class EmptyingLoop implements Runnable { // inner class public void run() { byte[] buffer = new byte[SIZE]; int position = SIZE; // force exchange first time through try { for (;;) { if (position == SIZE) { buffer = (byte[])(exchanger.rendezvous(buffer)); position = 0; } useByte(buffer[position++]); } } catch (BrokenBarrierException ex) {} // die catch (InterruptedException ex) {} // die } } } class PThreadsStyleBuffer { private final Mutex mutex = new Mutex(); private final CondVar notFull = new CondVar(mutex); private final CondVar notEmpty = new CondVar(mutex); private int count = 0; private int takePtr = 0; private int putPtr = 0; private final Object[] array; public PThreadsStyleBuffer(int capacity) { array = new Object[capacity]; } public void put(Object x) throws InterruptedException { mutex.acquire(); try { while (count == array.length) notFull.await(); array[putPtr] = x; putPtr = (putPtr + 1) % array.length; ++count; notEmpty.signal(); } finally { mutex.release(); } } public Object take() throws InterruptedException { Object x = null; mutex.acquire(); try { while (count == 0) notEmpty.await(); x = array[takePtr]; array[takePtr] = null; takePtr = (takePtr + 1) % array.length; --count; notFull.signal(); } finally { mutex.release(); } return x; } } class BankAccount { protected long balance = 0; public synchronized long balance() { return balance; } public synchronized void deposit(long amount) throws InsufficientFunds { if (balance + amount < 0) throw new InsufficientFunds(); else balance += amount; } public void withdraw(long amount) throws InsufficientFunds { deposit(-amount); } } class TSBoolean { private boolean value = false; // set to true; return old value public synchronized boolean testAndSet() { boolean oldValue = value; value = true; return oldValue; } public synchronized void clear() { value = false; } } class ATCheckingAccount extends BankAccount { protected ATSavingsAccount savings; protected long threshold; protected TSBoolean transferInProgress = new TSBoolean(); public ATCheckingAccount(long t) { threshold = t; } // called only upon initialization synchronized void initSavings(ATSavingsAccount s) { savings = s; } protected boolean shouldTry() { return balance < threshold; } void tryTransfer() { // called internally or from savings if (!transferInProgress.testAndSet()) { // if not busy ... try { synchronized(this) { if (shouldTry()) balance += savings.transferOut(); } } finally { transferInProgress.clear(); } } } public synchronized void deposit(long amount) throws InsufficientFunds { if (balance + amount < 0) throw new InsufficientFunds(); else { balance += amount; tryTransfer(); } } } class ATSavingsAccount extends BankAccount { protected ATCheckingAccount checking; protected long maxTransfer; public ATSavingsAccount(long max) { maxTransfer = max; } // called only upon initialization synchronized void initChecking(ATCheckingAccount c) { checking = c; } synchronized long transferOut() { // called only from checking long amount = balance; if (amount > maxTransfer) amount = maxTransfer; if (amount >= 0) balance -= amount; return amount; } public synchronized void deposit(long amount) throws InsufficientFunds { if (balance + amount < 0) throw new InsufficientFunds(); else { balance += amount; checking.tryTransfer(); } } } class Subject { protected double val = 0.0; // modeled state protected final EDU.oswego.cs.dl.util.concurrent.CopyOnWriteArrayList observers = new EDU.oswego.cs.dl.util.concurrent.CopyOnWriteArrayList(); public synchronized double getValue() { return val; } protected synchronized void setValue(double d) { val = d; } public void attach(Observer o) { observers.add(o); } public void detach(Observer o) { observers.remove(o); } public void changeValue(double newstate) { setValue(newstate); for (Iterator it = observers.iterator(); it.hasNext();) ((Observer)(it.next())).changed(this); } } class Observer { protected double cachedState; // last known state protected final Subject subj; // only one allowed here Observer(Subject s) { subj = s; cachedState = s.getValue(); display(); } synchronized void changed(Subject s){ if (s != subj) return; // only one subject double oldState = cachedState; cachedState = subj.getValue(); // probe if (oldState != cachedState) display(); } protected void display() { // somehow display subject state; for example just: System.out.println(cachedState); } } class Failure extends Exception {} interface Transactor { // Enter a new transaction and return true, if can do so public boolean join(Transaction t); // Return true if this transaction can be committed public boolean canCommit(Transaction t); // Update state to reflect current transaction public void commit(Transaction t) throws Failure; // Roll back state (No exception; ignore if inapplicable) public void abort(Transaction t); } class Transaction { // add anything you want here } interface TransBankAccount extends Transactor { public long balance(Transaction t) throws Failure; public void deposit(Transaction t, long amount) throws InsufficientFunds, Failure; public void withdraw(Transaction t, long amount) throws InsufficientFunds, Failure; } class SimpleTransBankAccount implements TransBankAccount { protected long balance = 0; protected long workingBalance = 0; // single shadow copy protected Transaction currentTx = null; // single transaction public synchronized long balance(Transaction t) throws Failure { if (t != currentTx) throw new Failure(); return workingBalance; } public synchronized void deposit(Transaction t, long amount) throws InsufficientFunds, Failure { if (t != currentTx) throw new Failure(); if (workingBalance < -amount) throw new InsufficientFunds(); workingBalance += amount; } public synchronized void withdraw(Transaction t, long amount) throws InsufficientFunds, Failure { deposit(t, -amount); } public synchronized boolean join(Transaction t) { if (currentTx != null) return false; currentTx = t; workingBalance = balance; return true; } public synchronized boolean canCommit(Transaction t) { return (t == currentTx); } public synchronized void abort(Transaction t) { if (t == currentTx) currentTx = null; } public synchronized void commit(Transaction t) throws Failure{ if (t != currentTx) throw new Failure(); balance = workingBalance; currentTx = null; } } class ProxyAccount /* implements TransBankAccount */ { private TransBankAccount delegate; public boolean join(Transaction t) { return delegate.join(t); } public long balance(Transaction t) throws Failure { return delegate.balance(t); } // and so on... } class FailedTransferException extends Exception {} class RetryableTransferException extends Exception {} class TransactionLogger { void cancelLogEntry(Transaction t, long amount, TransBankAccount src, TransBankAccount dst) {} void logTransfer(Transaction t, long amount, TransBankAccount src, TransBankAccount dst) {} void logCompletedTransfer(Transaction t, long amount, TransBankAccount src, TransBankAccount dst) {} } class AccountUser { TransactionLogger log; // a made-up class // helper method called on any failure void rollback(Transaction t, long amount, TransBankAccount src, TransBankAccount dst) { log.cancelLogEntry(t, amount, src, dst); src.abort(t); dst.abort(t); } public boolean transfer(long amount, TransBankAccount src, TransBankAccount dst) throws FailedTransferException, RetryableTransferException { if (src == null || dst == null) // screen arguments throw new IllegalArgumentException(); if (src == dst) return true; // avoid aliasing Transaction t = new Transaction(); log.logTransfer(t, amount, src, dst); // record if (!src.join(t) || !dst.join(t)) { // cannot join rollback(t, amount, src, dst); throw new RetryableTransferException(); } try { src.withdraw(t, amount); dst.deposit(t, amount); } catch (InsufficientFunds ex) { // semantic failure rollback(t, amount, src, dst); return false; } catch (Failure k) { // transaction error rollback(t, amount, src, dst); throw new RetryableTransferException(); } if (!src.canCommit(t) || !dst.canCommit(t)) { // interference rollback(t, amount, src, dst); throw new RetryableTransferException(); } try { src.commit(t); dst.commit(t); log.logCompletedTransfer(t, amount, src, dst); return true; } catch(Failure k) { // commitment failure rollback(t, amount, src, dst); throw new FailedTransferException(); } } } class ColoredThing { protected Color myColor = Color.red; // the sample property protected boolean changePending; // vetoable listeners: protected final VetoableChangeMulticaster vetoers = new VetoableChangeMulticaster(this); // also some ordinary listeners: protected final PropertyChangeMulticaster listeners = new PropertyChangeMulticaster(this); // registration methods, including: void addVetoer(VetoableChangeListener l) { vetoers.addVetoableChangeListener(l); } public synchronized Color getColor() { // property accessor return myColor; } // internal helper methods protected synchronized void commitColor(Color newColor) { myColor = newColor; changePending = false; } protected synchronized void abortSetColor() { changePending = false; } public void setColor(Color newColor) throws PropertyVetoException { Color oldColor = null; boolean completed = false; synchronized (this) { if (changePending) { // allow only one transaction at a time throw new PropertyVetoException( "Concurrent modification", null); } else if (newColor == null) { // Argument screening throw new PropertyVetoException( "Cannot change color to Null", null); } else { changePending = true; oldColor = myColor; } } try { vetoers.fireVetoableChange("color", oldColor, newColor); // fall through if no exception: commitColor(newColor); completed = true; // notify other listeners that change is committed listeners.firePropertyChange("color", oldColor, newColor); } catch(PropertyVetoException ex) { // abort on veto abortSetColor(); completed = true; throw ex; } finally { // trap any unchecked exception if (!completed) abortSetColor(); } } } class Semaphore implements Sync { protected long permits; // current number of available permits public Semaphore(long initialPermits) { permits = initialPermits; } public synchronized void release() { ++permits; notify(); } public void acquire() throws InterruptedException { if (Thread.interrupted()) throw new InterruptedException(); synchronized(this) { try { while (permits <= 0) wait(); --permits; } catch (InterruptedException ie) { notify(); throw ie; } } } public boolean attempt(long msecs)throws InterruptedException{ if (Thread.interrupted()) throw new InterruptedException(); synchronized(this) { if (permits > 0) { // Same as acquire but messier --permits; return true; } else if (msecs <= 0) // avoid timed wait if not needed return false; else { try { long startTime = System.currentTimeMillis(); long waitTime = msecs; for (;;) { wait(waitTime); if (permits > 0) { --permits; return true; } else { // Check for time-out long now = System.currentTimeMillis(); waitTime = msecs - (now - startTime); if (waitTime <= 0) return false; } } } catch(InterruptedException ie) { notify(); throw ie; } } } } } final class BoundedBufferWithDelegates { private Object[] array; private Exchanger putter; private Exchanger taker; public BoundedBufferWithDelegates(int capacity) throws IllegalArgumentException { if (capacity <= 0) throw new IllegalArgumentException(); array = new Object[capacity]; putter = new Exchanger(capacity); taker = new Exchanger(0); } public void put(Object x) throws InterruptedException { putter.exchange(x); } public Object take() throws InterruptedException { return taker.exchange(null); } void removedSlotNotification(Exchanger h) { // relay if (h == putter) taker.addedSlotNotification(); else putter.addedSlotNotification(); } protected class Exchanger { // Inner class protected int ptr = 0; // circular index protected int slots; // number of usable slots protected int waiting = 0; // number of waiting threads Exchanger(int n) { slots = n; } synchronized void addedSlotNotification() { ++slots; if (waiting > 0) // unblock a single waiting thread notify(); } Object exchange(Object x) throws InterruptedException { Object old = null; // return value synchronized(this) { while (slots <= 0) { // wait for slot ++waiting; try { wait(); } catch(InterruptedException ie) { notify(); throw ie; } finally { --waiting; } } --slots; // use slot old = array[ptr]; array[ptr] = x; ptr = (ptr + 1) % array.length; // advance position } removedSlotNotification(this); // notify of change return old; } } } final class BoundedBufferWithMonitorObjects { private final Object[] array; // the elements private int putPtr = 0; // circular indices private int takePtr = 0; private int emptySlots; // slot counts private int usedSlots = 0; private int waitingPuts = 0; // counts of waiting threads private int waitingTakes = 0; private final Object putMonitor = new Object(); private final Object takeMonitor = new Object(); public BoundedBufferWithMonitorObjects(int capacity) throws IllegalArgumentException { if (capacity <= 0) throw new IllegalArgumentException(); array = new Object[capacity]; emptySlots = capacity; } public void put(Object x) throws InterruptedException { synchronized(putMonitor) { while (emptySlots <= 0) { ++waitingPuts; try { putMonitor.wait(); } catch(InterruptedException ie) { putMonitor.notify(); throw ie; } finally { --waitingPuts; } } --emptySlots; array[putPtr] = x; putPtr = (putPtr + 1) % array.length; } synchronized(takeMonitor) { // directly notify ++usedSlots; if (waitingTakes > 0) takeMonitor.notify(); } } public Object take() throws InterruptedException { Object old = null; synchronized(takeMonitor) { while (usedSlots <= 0) { ++waitingTakes; try { takeMonitor.wait(); } catch(InterruptedException ie) { takeMonitor.notify(); throw ie; } finally { --waitingTakes; } } --usedSlots; old = array[takePtr]; array[takePtr] = null; takePtr = (takePtr + 1) % array.length; } synchronized(putMonitor) { ++emptySlots; if (waitingPuts > 0) putMonitor.notify(); } return old; } } class FIFOSemaphore extends Semaphore { protected final WaitQueue queue = new WaitQueue(); public FIFOSemaphore(long initialPermits) { super(initialPermits); } public void acquire() throws InterruptedException { if (Thread.interrupted()) throw new InterruptedException(); WaitNode node = null; synchronized(this) { if (permits > 0) { // no need to queue --permits; return; } else { node = new WaitNode(); queue.enq(node); } } // must release lock before node wait node.doWait(); } public synchronized void release() { for (;;) { // retry until success WaitNode node = queue.deq(); if (node == null) { // queue is empty ++permits; return; } else if (node.doNotify()) return; // else node was already released due to // interruption or time-out, so must retry } } // Queue node class. Each node serves as a monitor. protected static class WaitNode { boolean released = false; WaitNode next = null; synchronized void doWait() throws InterruptedException { try { while (!released) wait(); } catch (InterruptedException ie) { if (!released) { // Interrupted before notified // Suppress future notifications: released = true; throw ie; } else { // Interrupted after notified // Ignore exception but propagate status: Thread.currentThread().interrupt(); } } } synchronized boolean doNotify() { // return true if notified if (released) // was interrupted or timed out return false; else { released = true; notify(); return true; } } synchronized boolean doTimedWait(long msecs) throws InterruptedException { return true; // similar } } // Standard linked queue class. // Used only when holding Semaphore lock. protected static class WaitQueue { protected WaitNode head = null; protected WaitNode last = null; protected void enq(WaitNode node) { if (last == null) head = last = node; else { last.next = node; last = node; } } protected WaitNode deq() { WaitNode node = head; if (node != null) { head = node.next; if (head == null) last = null; node.next = null; } return node; } } } class WebService implements Runnable { static final int PORT = 1040; // just for demo Handler handler = new Handler(); public void run() { try { ServerSocket socket = new ServerSocket(PORT); for (;;) { final Socket connection = socket.accept(); new Thread(new Runnable() { public void run() { handler.process(connection); }}).start(); } } catch(Exception e) { } // die } public static void main(String[] args) { new Thread(new WebService()).start(); } } class Handler { void process(Socket s) { DataInputStream in = null; DataOutputStream out = null; try { in = new DataInputStream(s.getInputStream()); out = new DataOutputStream(s.getOutputStream()); int request = in.readInt(); int result = -request; // return negation to client out.writeInt(result); } catch(IOException ex) {} // fall through finally { // clean up try { if (in != null) in.close(); } catch (IOException ignore) {} try { if (out != null) out.close(); } catch (IOException ignore) {} try { s.close(); } catch (IOException ignore) {} } } } class OpenCallHost { // Generic code sketch protected long localState; protected final Helper helper = new Helper(); protected synchronized void updateState() { localState = 2; // ...; } public void req() { updateState(); helper.handle(); } } class ThreadPerMessageHost { // Generic code sketch protected long localState; protected final Helper helper = new Helper(); protected synchronized void updateState() { localState = 2; // ...; } public void req() { updateState(); new Thread(new Runnable() { public void run() { helper.handle(); } }).start(); } } interface Executor { void execute(Runnable r); } class HostWithExecutor { // Generic code sketch protected long localState; protected final Helper helper = new Helper(); protected final Executor executor; public HostWithExecutor(Executor e) { executor = e; } protected synchronized void updateState() { localState = 2; // ...; } public void req() { updateState(); executor.execute(new Runnable() { public void run() { helper.handle(); } }); } } class PlainWorkerPool implements Executor { protected final Channel workQueue; public void execute(Runnable r) { try { workQueue.put(r); } catch (InterruptedException ie) { // postpone response Thread.currentThread().interrupt(); } } public PlainWorkerPool(Channel ch, int nworkers) { workQueue = ch; for (int i = 0; i < nworkers; ++i) activate(); } protected void activate() { Runnable runLoop = new Runnable() { public void run() { try { for (;;) { Runnable r = (Runnable)(workQueue.take()); r.run(); } } catch (InterruptedException ie) {} // die } }; new Thread(runLoop).start(); } } class TimerDaemon { // Fragments static class TimerTask implements Comparable { final Runnable command; final long execTime; // time to run at public int compareTo(Object x) { long otherExecTime = ((TimerTask)(x)).execTime; return (execTime < otherExecTime) ? -1 : (execTime == otherExecTime)? 0 : 1; } TimerTask(Runnable r, long t) { command = r; execTime = t; } } // a heap or list with methods that preserve // ordering with respect to TimerTask.compareTo static class PriorityQueue { void put(TimerTask t) {} TimerTask least() { return null; } void removeLeast() {} boolean isEmpty() { return true; } } protected final PriorityQueue pq = new PriorityQueue(); public synchronized void executeAfterDelay(Runnable r,long t){ pq.put(new TimerTask(r, t + System.currentTimeMillis())); notifyAll(); } public synchronized void executeAt(Runnable r, Date time) { pq.put(new TimerTask(r, time.getTime())); notifyAll(); } // wait for and then return next task to run protected synchronized Runnable take() throws InterruptedException { for (;;) { while (pq.isEmpty()) wait(); TimerTask t = pq.least(); long now = System.currentTimeMillis(); long waitTime = now - t.execTime; if (waitTime <= 0) { pq.removeLeast(); return t.command; } else wait(waitTime); } } public TimerDaemon() { activate(); } // only one void activate() { // same as PlainWorkerThread except using above take method } } class SessionTask implements Runnable { // generic code sketch static final int BUFFSIZE = 1024; protected final Socket socket; protected final InputStream input; SessionTask(Socket s) throws IOException { socket = s; input = socket.getInputStream(); } void processCommand(byte[] b, int n) {} void cleanup() {} public void run() { // Normally run in a new thread byte[] commandBuffer = new byte[BUFFSIZE]; try { for (;;) { int bytes = input.read(commandBuffer, 0, BUFFSIZE); if (bytes != BUFFSIZE) break; processCommand(commandBuffer, bytes); } } catch (IOException ex) { cleanup(); } finally { try { input.close(); socket.close(); } catch(IOException ignore) {} } } } class IOEventTask implements Runnable { // generic code sketch static final int BUFFSIZE = 1024; protected final Socket socket; protected final InputStream input; protected volatile boolean done = false; // latches true IOEventTask(Socket s) throws IOException { socket = s; input = socket.getInputStream(); } void processCommand(byte[] b, int n) {} void cleanup() {} public void run() { // trigger only when input available if (done) return; byte[] commandBuffer = new byte[BUFFSIZE]; try { int bytes = input.read(commandBuffer, 0, BUFFSIZE); if (bytes != BUFFSIZE) done = true; else processCommand(commandBuffer, bytes); } catch (IOException ex) { cleanup(); done = true; } finally { if (!done) return; try { input.close(); socket.close(); } catch(IOException ignore) {} } } // Accessor methods needed by triggering agent: boolean done() { return done; } InputStream input() { return input; } } class PollingWorker implements Runnable { // Incomplete private java.util.List tasks = new LinkedList(); // ...; private long sleepTime = 100; // ...; void register(IOEventTask t) { tasks.add(t); } void deregister(IOEventTask t) { tasks.remove(t); } public void run() { try { for (;;) { for (Iterator it = tasks.iterator(); it.hasNext();) { IOEventTask t = (IOEventTask)(it.next()); if (t.done()) deregister(t); else { boolean trigger; try { trigger = t.input().available() > 0; } catch (IOException ex) { trigger = true; // trigger if exception on check } if (trigger) t.run(); } } Thread.sleep(sleepTime); } } catch (InterruptedException ie) {} } } abstract class Box { protected Color color = Color.white; public synchronized Color getColor() { return color; } public synchronized void setColor(Color c) { color = c; } public abstract java.awt.Dimension size(); public abstract Box duplicate(); // clone public abstract void show(Graphics g, Point origin);// display } class BasicBox extends Box { protected Dimension size; public BasicBox(int xdim, int ydim) { size = new Dimension(xdim, ydim); } public synchronized Dimension size() { return size; } public void show(Graphics g, Point origin) { g.setColor(getColor()); g.fillRect(origin.x, origin.y, size.width, size.height); } public synchronized Box duplicate() { Box p = new BasicBox(size.width, size.height); p.setColor(getColor()); return p; } } abstract class JoinedPair extends Box { protected Box fst; // one of the boxes protected Box snd; // the other one protected JoinedPair(Box a, Box b) { fst = a; snd = b; } public synchronized void flip() { // swap fst/snd Box tmp = fst; fst = snd; snd = tmp; } public void show(Graphics g, Point p) {} public Dimension size() { return new Dimension(0,0); } public Box duplicate() { return null; } // other internal helper methods } class HorizontallyJoinedPair extends JoinedPair { public HorizontallyJoinedPair(Box l, Box r) { super(l, r); } public synchronized Box duplicate() { HorizontallyJoinedPair p = new HorizontallyJoinedPair(fst.duplicate(), snd.duplicate()); p.setColor(getColor()); return p; } // ... other implementations of abstract Box methods } class VerticallyJoinedPair extends JoinedPair { public VerticallyJoinedPair(Box l, Box r) { super(l, r); } // similar } class WrappedBox extends Box { protected Dimension wrapperSize; protected Box inner; public WrappedBox(Box innerBox, Dimension size) { inner = innerBox; wrapperSize = size; } public void show(Graphics g, Point p) {} public Dimension size() { return new Dimension(0,0); } public Box duplicate() { return null; } // ... other implementations of abstract Box methods } interface PushSource { void start(); } interface PushStage { void putA(Box p); } interface DualInputPushStage extends PushStage { void putB(Box p); } class DualInputAdapter implements PushStage { protected final DualInputPushStage stage; public DualInputAdapter(DualInputPushStage s) { stage = s; } public void putA(Box p) { stage.putB(p); } } class DevNull implements PushStage { public void putA(Box p) { } } class SingleOutputPushStage { private PushStage next1 = null; protected synchronized PushStage next1() { return next1; } public synchronized void attach1(PushStage s) { next1 = s; } } class DualOutputPushStage extends SingleOutputPushStage { private PushStage next2 = null; protected synchronized PushStage next2() { return next2; } public synchronized void attach2(PushStage s) { next2 = s; } }class Painter extends SingleOutputPushStage implements PushStage { protected final Color color; // the color to paint things public Painter(Color c) { color = c; } public void putA(Box p) { p.setColor(color); next1().putA(p); } } class Wrapper extends SingleOutputPushStage implements PushStage { protected final int thickness; public Wrapper(int t) { thickness = t; } public void putA(Box p) { Dimension d = new Dimension(thickness, thickness); next1().putA(new WrappedBox(p, d)); } } class Flipper extends SingleOutputPushStage implements PushStage { public void putA(Box p) { if (p instanceof JoinedPair) ((JoinedPair) p).flip(); next1().putA(p); } } abstract class Joiner extends SingleOutputPushStage implements DualInputPushStage { protected Box a = null; // incoming from putA protected Box b = null; // incoming from putB protected abstract Box join(Box p, Box q); protected synchronized Box joinFromA(Box p) { while (a != null) // wait until last consumed try { wait(); } catch (InterruptedException e) { return null; } a = p; return tryJoin(); } protected synchronized Box joinFromB(Box p) { // symmetrical while (b != null) try { wait(); } catch (InterruptedException ie) { return null; } b = p; return tryJoin(); } protected synchronized Box tryJoin() { if (a == null || b == null) return null; // cannot join Box joined = join(a, b); // make combined box a = b = null; // forget old boxes notifyAll(); // allow new puts return joined; } public void putA(Box p) { Box j = joinFromA(p); if (j != null) next1().putA(j); } public void putB(Box p) { Box j = joinFromB(p); if (j != null) next1().putA(j); } } class HorizontalJoiner extends Joiner { protected Box join(Box p, Box q) { return new HorizontallyJoinedPair(p, q); } } class VerticalJoiner extends Joiner { protected Box join(Box p, Box q) { return new VerticallyJoinedPair(p, q); } } class Collector extends SingleOutputPushStage implements DualInputPushStage { public void putA(Box p) { next1().putA(p);} public void putB(Box p) { next1().putA(p); } } class Alternator extends DualOutputPushStage implements PushStage { protected boolean outTo2 = false; // control alternation protected synchronized boolean testAndInvert() { boolean b = outTo2; outTo2 = !outTo2; return b; } public void putA(final Box p) { if (testAndInvert()) next1().putA(p); else { new Thread(new Runnable() { public void run() { next2().putA(p); } }).start(); } } } class Cloner extends DualOutputPushStage implements PushStage { public void putA(Box p) { final Box p2 = p.duplicate(); next1().putA(p); new Thread(new Runnable() { public void run() { next2().putA(p2); } }).start(); } } interface BoxPredicate { boolean test(Box p); } class MaxSizePredicate implements BoxPredicate { protected final int max; // max size to let through public MaxSizePredicate(int maximum) { max = maximum; } public boolean test(Box p) { return p.size().height <= max && p.size().width <= max; } } class Screener extends DualOutputPushStage implements PushStage { protected final BoxPredicate predicate; public Screener(BoxPredicate p) { predicate = p; } public void putA(final Box p) { if (predicate.test(p)) { new Thread(new Runnable() { public void run() { next1().putA(p); } }).start(); } else next2().putA(p); } } class BasicBoxSource extends SingleOutputPushStage implements PushSource, Runnable { protected final Dimension size; // maximum sizes protected final int productionTime; // simulated delay public BasicBoxSource(Dimension s, int delay) { size = s; productionTime = delay; } protected Box produce() { return new BasicBox((int)(Math.random() * size.width) + 1, (int)(Math.random() * size.height) + 1); } public void start() { next1().putA(produce()); } public void run() { try { for (;;) { start(); Thread.sleep((int)(Math.random() * 2* productionTime)); } } catch (InterruptedException ie) { } // die } } interface FileReader { void read(String filename, FileReaderClient client); } interface FileReaderClient { void readCompleted(String filename, byte[] data); void readFailed(String filename, IOException ex); } class FileReaderApp implements FileReaderClient { // Fragments protected FileReader reader = new AFileReader(); public void readCompleted(String filename, byte[] data) { // ... use data ... } public void readFailed(String filename, IOException ex){ // ... deal with failure ... } public void actionRequiringFile() { reader.read("AppFile", this); } public void actionNotRequiringFile() { } } class AFileReader implements FileReader { public void read(final String fn, final FileReaderClient c) { new Thread(new Runnable() { public void run() { doRead(fn, c); } }).start(); } protected void doRead(String fn, FileReaderClient client) { byte[] buffer = new byte[1024]; // just for illustration try { FileInputStream s = new FileInputStream(fn); s.read(buffer); if (client != null) client.readCompleted(fn, buffer); } catch (IOException ex) { if (client != null) client.readFailed(fn, ex); } } } class FileApplication implements FileReaderClient { private String[] filenames; private int currentCompletion; // index of ready file public synchronized void readCompleted(String fn, byte[] d) { // wait until ready to process this callback while (!fn.equals(filenames[currentCompletion])) { try { wait(); } catch(InterruptedException ex) { return; } } // ... process data... // wake up any other thread waiting on this condition: ++currentCompletion; notifyAll(); } public synchronized void readFailed(String fn, IOException e){ // similar... } public synchronized void readfiles() { AFileReader reader = new AFileReader(); currentCompletion = 0; for (int i = 0; i < filenames.length; ++i) reader.read(filenames[i],this); } } interface Pic { byte[] getImage(); } interface Renderer { Pic render(URL src); } class StandardRenderer implements Renderer { public Pic render(URL src) { return null ; } } class PictureApp { // Code sketch // ... private final Renderer renderer = new StandardRenderer(); void displayBorders() {} void displayCaption() {} void displayImage(byte[] b) {} void cleanup() {} public void show(final URL imageSource) { class Waiter implements Runnable { private Pic result = null; Pic getResult() { return result; } public void run() { result = renderer.render(imageSource); } }; Waiter waiter = new Waiter(); Thread t = new Thread(waiter); t.start(); displayBorders(); // do other things displayCaption(); // while rendering try { t.join(); } catch(InterruptedException e) { cleanup(); return; } Pic pic = waiter.getResult(); if (pic != null) displayImage(pic.getImage()); else {} // ... deal with assumed rendering failure } } class AsynchRenderer implements Renderer { private final Renderer renderer = new StandardRenderer(); static class FuturePic implements Pic { // inner class private Pic pic = null; private boolean ready = false; synchronized void setPic(Pic p) { pic = p; ready = true; notifyAll(); } public synchronized byte[] getImage() { while (!ready) try { wait(); } catch (InterruptedException e) { return null; } return pic.getImage(); } } public Pic render(final URL src) { final FuturePic p = new FuturePic(); new Thread(new Runnable() { public void run() { p.setPic(renderer.render(src)); } }).start(); return p; } } class PicturAppWithFuture { // Code sketch private final Renderer renderer = new AsynchRenderer(); void displayBorders() {} void displayCaption() {} void displayImage(byte[] b) {} void cleanup() {} public void show(final URL imageSource) { Pic pic = renderer.render(imageSource); displayBorders(); // do other things ... displayCaption(); byte[] im = pic.getImage(); if (im != null) displayImage(im); else {} // deal with assumed rendering failure } } class FutureResult { // Fragments protected Object value = null; protected boolean ready = false; protected InvocationTargetException exception = null; public synchronized Object get() throws InterruptedException, InvocationTargetException { while (!ready) wait(); if (exception != null) throw exception; else return value; } public Runnable setter(final Callable function) { return new Runnable() { public void run() { try { set(function.call()); } catch(Throwable e) { setException(e); } } }; } synchronized void set(Object result) { value = result; ready = true; notifyAll(); } synchronized void setException(Throwable e) { exception = new InvocationTargetException(e); ready = true; notifyAll(); } // ... other auxiliary and convenience methods ... } class PictureDisplayWithFutureResult { // Code sketch void displayBorders() {} void displayCaption() {} void displayImage(byte[] b) {} void cleanup() {} private final Renderer renderer = new StandardRenderer(); // ... public void show(final URL imageSource) { try { FutureResult futurePic = new FutureResult(); Runnable command = futurePic.setter(new Callable() { public Object call() { return renderer.render(imageSource); } }); new Thread(command).start(); displayBorders(); displayCaption(); displayImage(((Pic)(futurePic.get())).getImage()); } catch (InterruptedException ex) { cleanup(); return; } catch (InvocationTargetException ex) { cleanup(); return; } } } interface Disk { void read(int cylinderNumber, byte[] buffer) throws Failure; void write(int cylinderNumber, byte[] buffer) throws Failure; } abstract class DiskTask implements Runnable { protected final int cylinder; // read/write parameters protected final byte[] buffer; protected Failure exception = null; // to relay out protected DiskTask next = null; // for use in queue protected final Latch done = new Latch(); // status indicator DiskTask(int c, byte[] b) { cylinder = c; buffer = b; } abstract void access() throws Failure; // read or write public void run() { try { access(); } catch (Failure ex) { setException(ex); } finally { done.release(); } } void awaitCompletion() throws InterruptedException { done.acquire(); } synchronized Failure getException() { return exception; } synchronized void setException(Failure f) { exception = f; } } class DiskReadTask extends DiskTask { DiskReadTask(int c, byte[] b) { super(c, b); } void access() throws Failure { /* ... raw read ... */ } } class DiskWriteTask extends DiskTask { DiskWriteTask(int c, byte[] b) { super(c, b); } void access() throws Failure { /* ... raw write ... */ } } class ScheduledDisk implements Disk { protected final DiskTaskQueue tasks = new DiskTaskQueue(); public void read(int c, byte[] b) throws Failure { readOrWrite(new DiskReadTask(c, b)); } public void write(int c, byte[] b) throws Failure { readOrWrite(new DiskWriteTask(c, b)); } protected void readOrWrite(DiskTask t) throws Failure { tasks.put(t); try { t.awaitCompletion(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); // propagate throw new Failure(); // convert to failure exception } Failure f = t.getException(); if (f != null) throw f; } public ScheduledDisk() { // construct worker thread new Thread(new Runnable() { public void run() { try { for (;;) { tasks.take().run(); } } catch (InterruptedException ex) {} // die } }).start(); } } class DiskTaskQueue { protected DiskTask thisSweep = null; protected DiskTask nextSweep = null; protected int currentCylinder = 0; protected final Semaphore available = new Semaphore(0); void put(DiskTask t) { insert(t); available.release(); } DiskTask take() throws InterruptedException { available.acquire(); return extract(); } synchronized void insert(DiskTask t) { DiskTask q; if (t.cylinder >= currentCylinder) { // determine queue q = thisSweep; if (q == null) { thisSweep = t; return; } } else { q = nextSweep; if (q == null) { nextSweep = t; return; } } DiskTask trail = q; // ordered linked list insert q = trail.next; for (;;) { if (q == null || t.cylinder < q.cylinder) { trail.next = t; t.next = q; return; } else { trail = q; q = q.next; } } } synchronized DiskTask extract() { // PRE: not empty if (thisSweep == null) { // possibly swap queues thisSweep = nextSweep; nextSweep = null; } DiskTask t = thisSweep; thisSweep = t.next; currentCylinder = t.cylinder; return t; } } class Fib extends FJTask { static final int sequentialThreshold = 13; // for tuning volatile int number; // argument/result Fib(int n) { number = n; } int seqFib(int n) { if (n <= 1) return n; else return seqFib(n-1) + seqFib(n-2);<|fim▁hole|> int getAnswer() { if (!isDone()) throw new IllegalStateException("Not yet computed"); return number; } public void run() { int n = number; if (n <= sequentialThreshold) // base case number = seqFib(n); else { Fib f1 = new Fib(n - 1); // create subtasks Fib f2 = new Fib(n - 2); coInvoke(f1, f2); // fork then join both number = f1.number + f2.number; // combine results } } public static void main(String[] args) { // sample driver try { int groupSize = 2; // 2 worker threads int num = 35; // compute fib(35) FJTaskRunnerGroup group = new FJTaskRunnerGroup(groupSize); Fib f = new Fib(num); group.invoke(f); int result = f.getAnswer(); System.out.println("Answer: " + result); } catch (InterruptedException ex) {} // die } } class FibVL extends FJTask { static final int sequentialThreshold = 13; // for tuning volatile int number; // as before final FibVL next; // embedded linked list of sibling tasks FibVL(int n, FibVL list) { number = n; next = list; } int seqFib(int n) { if (n <= 1) return n; else return seqFib(n-1) + seqFib(n-2); } public void run() { int n = number; if (n <= sequentialThreshold) number = seqFib(n); else { FibVL forked = null; // list of subtasks forked = new FibVL(n - 1, forked); // prepends to list forked.fork(); forked = new FibVL(n - 2, forked); forked.fork(); number = accumulate(forked); } } // Traverse list, joining each subtask and adding to result int accumulate(FibVL list) { int r = 0; for (FibVL f = list; f != null; f = f.next) { f.join(); r += f.number; } return r; } } class FibVCB extends FJTask { static final int sequentialThreshold = 13; // for tuning // ... volatile int number = 0; // as before final FibVCB parent; // Is null for outermost call int callbacksExpected = 0; volatile int callbacksReceived = 0; FibVCB(int n, FibVCB p) { number = n; parent = p; } int seqFib(int n) { if (n <= 1) return n; else return seqFib(n-1) + seqFib(n-2); } // Callback method invoked by subtasks upon completion synchronized void addToResult(int n) { number += n; ++callbacksReceived; } public void run() { // same structure as join-based version int n = number; if (n <= sequentialThreshold) number = seqFib(n); else { // clear number so subtasks can fill in number = 0; // establish number of callbacks expected callbacksExpected = 2; new FibVCB(n - 1, this).fork(); new FibVCB(n - 2, this).fork(); // Wait for callbacks from children while (callbacksReceived < callbacksExpected) yield(); } // Call back parent if (parent != null) parent.addToResult(number); } } class NQueens extends FJTask { static int boardSize; // fixed after initialization in main // Boards are arrays where each cell represents a row, // and holds the column number of the queen in that row static class Result { // holder for ultimate result private int[] board = null; // non-null when solved synchronized boolean solved() { return board != null; } synchronized void set(int[] b) { // Support use by non-Tasks if (board == null) { board = b; notifyAll(); } } synchronized int[] await() throws InterruptedException { while (board == null) wait(); return board; } } static final Result result = new Result(); public static void main(String[] args) { try { boardSize = 8; // ...; FJTaskRunnerGroup tasks = new FJTaskRunnerGroup(4); int[] initialBoard = new int[0]; // start with empty board tasks.execute(new NQueens(initialBoard)); int[] board = result.await(); } catch (InterruptedException ie) {} // ... } final int[] sofar; // initial configuration NQueens(int[] board) { this.sofar = board; } public void run() { if (!result.solved()) { // skip if already solved int row = sofar.length; if (row >= boardSize) // done result.set(sofar); else { // try all expansions for (int q = 0; q < boardSize; ++q) { // Check if queen can be placed in column q of next row boolean attacked = false; for (int i = 0; i < row; ++i) { int p = sofar[i]; if (q == p || q == p - (row-i) || q == p + (row-i)) { attacked = true; break; } } // If so, fork to explore moves from new configuration if (!attacked) { // build extended board representation int[] next = new int[row+1]; for (int k = 0; k < row; ++k) next[k] = sofar[k]; next[row] = q; new NQueens(next).fork(); } } } } } } abstract class JTree extends FJTask { volatile double maxDiff; // for convergence check } class Interior extends JTree { private final JTree[] quads; Interior(JTree q1, JTree q2, JTree q3, JTree q4) { quads = new JTree[] { q1, q2, q3, q4 }; } public void run() { coInvoke(quads); double md = 0.0; for (int i = 0; i < quads.length; ++i) { md = Math.max(md,quads[i].maxDiff); quads[i].reset(); } maxDiff = md; } } class Leaf extends JTree { private final double[][] A; private final double[][] B; private final int loRow; private final int hiRow; private final int loCol; private final int hiCol; private int steps = 0; Leaf(double[][] A, double[][] B, int loRow, int hiRow, int loCol, int hiCol) { this.A = A; this.B = B; this.loRow = loRow; this.hiRow = hiRow; this.loCol = loCol; this.hiCol = hiCol; } public synchronized void run() { boolean AtoB = (steps++ % 2) == 0; double[][] a = (AtoB)? A : B; double[][] b = (AtoB)? B : A; double md = 0.0; for (int i = loRow; i <= hiRow; ++i) { for (int j = loCol; j <= hiCol; ++j) { b[i][j] = 0.25 * (a[i-1][j] + a[i][j-1] + a[i+1][j] + a[i][j+1]); md = Math.max(md, Math.abs(b[i][j] - a[i][j])); } } maxDiff = md; } } class Jacobi extends FJTask { static final double EPSILON = 0.001; // convergence criterion final JTree root; final int maxSteps; Jacobi(double[][] A, double[][] B, int firstRow, int lastRow, int firstCol, int lastCol, int maxSteps, int leafCells) { this.maxSteps = maxSteps; root = build(A, B, firstRow, lastRow, firstCol, lastCol, leafCells); } public void run() { for (int i = 0; i < maxSteps; ++i) { invoke(root); if (root.maxDiff < EPSILON) { System.out.println("Converged"); return; } else root.reset(); } } static JTree build(double[][] a, double[][] b, int lr, int hr, int lc, int hc, int size) { if ((hr - lr + 1) * (hc - lc + 1) <= size) return new Leaf(a, b, lr, hr, lc, hc); int mr = (lr + hr) / 2; // midpoints int mc = (lc + hc) / 2; return new Interior(build(a, b, lr, mr, lc, mc, size), build(a, b, lr, mr, mc+1, hc, size), build(a, b, mr+1, hr, lc, mc, size), build(a, b, mr+1, hr, mc+1, hc, size)); } } class CyclicBarrier { protected final int parties; protected int count; // parties currently being waited for protected int resets = 0; // times barrier has been tripped CyclicBarrier(int c) { count = parties = c; } synchronized int barrier() throws InterruptedException { int index = --count; if (index > 0) { // not yet tripped int r = resets; // wait until next reset do { wait(); } while (resets == r); } else { // trip count = parties; // reset count for next time ++resets; notifyAll(); // cause all other parties to resume } return index; } } class Segment implements Runnable { // Code sketch final CyclicBarrier bar; // shared by all segments Segment(CyclicBarrier b) { bar = b; } void update() { } public void run() { // ... try { for (int i = 0; i < 10 /* iterations */; ++i) { update(); bar.barrier(); } } catch (InterruptedException ie) {} // ... } } class Problem { int size; } class Driver { // ... int granularity = 1; void compute(Problem problem) throws Exception { int n = problem.size / granularity; CyclicBarrier barrier = new CyclicBarrier(n); Thread[] threads = new Thread[n]; // create for (int i = 0; i < n; ++i) threads[i] = new Thread(new Segment(barrier)); // trigger for (int i = 0; i < n; ++i) threads[i].start(); // await termination for (int i = 0; i < n; ++i) threads[i].join(); } } class JacobiSegment implements Runnable { // Incomplete // These are same as in Leaf class version: static final double EPSILON = 0.001; double[][] A; double[][] B; final int firstRow; final int lastRow; final int firstCol; final int lastCol; volatile double maxDiff; int steps = 0; void update() { /* Nearly same as Leaf.run */ } final CyclicBarrier bar; final JacobiSegment[] allSegments; // needed for convergence check volatile boolean converged = false; JacobiSegment(double[][] A, double[][] B, int firstRow, int lastRow, int firstCol, int lastCol, CyclicBarrier b, JacobiSegment[] allSegments) { this.A = A; this.B = B; this.firstRow = firstRow; this.lastRow = lastRow; this.firstCol = firstCol; this.lastCol = lastCol; this.bar = b; this.allSegments = allSegments; } public void run() { try { while (!converged) { update(); int myIndex = bar.barrier(); // wait for all to update if (myIndex == 0) convergenceCheck(); bar.barrier(); // wait for convergence check } } catch(Exception ex) { // clean up ... } } void convergenceCheck() { for (int i = 0; i < allSegments.length; ++i) if (allSegments[i].maxDiff > EPSILON) return; for (int i = 0; i < allSegments.length; ++i) allSegments[i].converged = true; } } class ActiveRunnableExecutor extends Thread { Channel me = null; // ... // used for all incoming messages public void run() { try { for (;;) { ((Runnable)(me.take())).run(); } } catch (InterruptedException ie) {} // die } } //import jcsp.lang.*; class Fork implements jcsp.lang.CSProcess { private final jcsp.lang.AltingChannelInput[] fromPhil; Fork(jcsp.lang.AltingChannelInput l, jcsp.lang.AltingChannelInput r) { fromPhil = new jcsp.lang.AltingChannelInput[] { l, r }; } public void run() { jcsp.lang.Alternative alt = new jcsp.lang.Alternative(fromPhil); for (;;) { int i = alt.select(); // await message from either fromPhil[i].read(); // pick up fromPhil[i].read(); // put down } } } class Butler implements jcsp.lang.CSProcess { private final jcsp.lang.AltingChannelInput[] enters; private final jcsp.lang.AltingChannelInput[] exits; Butler(jcsp.lang.AltingChannelInput[] e, jcsp.lang.AltingChannelInput[] x) { enters = e; exits = x; } public void run() { int seats = enters.length; int nseated = 0; // set up arrays for select jcsp.lang.AltingChannelInput[] chans = new jcsp.lang.AltingChannelInput[2*seats]; for (int i = 0; i < seats; ++i) { chans[i] = exits[i]; chans[seats + i] = enters[i]; } jcsp.lang.Alternative either = new jcsp.lang.Alternative(chans); jcsp.lang.Alternative exit = new jcsp.lang.Alternative(exits); for (;;) { // if max number are seated, only allow exits jcsp.lang.Alternative alt = (nseated < seats-1)? either : exit; int i = alt.fairSelect(); chans[i].read(); // if i is in first half of array, it is an exit message if (i < seats) --nseated; else ++nseated; } } } class Philosopher implements jcsp.lang.CSProcess { private final jcsp.lang.ChannelOutput leftFork; private final jcsp.lang.ChannelOutput rightFork; private final jcsp.lang.ChannelOutput enter; private final jcsp.lang.ChannelOutput exit; Philosopher(jcsp.lang.ChannelOutput l, jcsp.lang.ChannelOutput r, jcsp.lang.ChannelOutput e, jcsp.lang.ChannelOutput x) { leftFork = l; rightFork = r; enter = e; exit = x; } public void run() { for (;;) { think(); enter.write(null); // get seat leftFork.write(null); // pick up left rightFork.write(null); // pick up right eat(); leftFork.write(null); // put down left rightFork.write(null); // put down right exit.write(null); // leave seat } } private void eat() {} private void think() {} } class College implements jcsp.lang.CSProcess { final static int N = 5; private final jcsp.lang.CSProcess action; College() { jcsp.lang.One2OneChannel[] lefts = jcsp.lang.One2OneChannel.create(N); jcsp.lang.One2OneChannel[] rights = jcsp.lang.One2OneChannel.create(N); jcsp.lang.One2OneChannel[] enters = jcsp.lang.One2OneChannel.create(N); jcsp.lang.One2OneChannel[] exits = jcsp.lang.One2OneChannel.create(N); Butler butler = new Butler(enters, exits); Philosopher[] phils = new Philosopher[N]; for (int i = 0; i < N; ++i) phils[i] = new Philosopher(lefts[i], rights[i], enters[i], exits[i]); Fork[] forks = new Fork[N]; for (int i = 0; i < N; ++i) forks[i] = new Fork(rights[(i + 1) % N], lefts[i]); action = new jcsp.lang.Parallel( new jcsp.lang.CSProcess[] { butler, new jcsp.lang.Parallel(phils), new jcsp.lang.Parallel(forks) }); } public void run() { action.run(); } public static void main(String[] args) { new College().run(); } }<|fim▁end|>
}
<|file_name|>Redrawable.java<|end_file_name|><|fim▁begin|>package com.kartoflane.superluminal2.components.interfaces; import org.eclipse.swt.events.PaintEvent; import org.eclipse.swt.events.PaintListener;<|fim▁hole|> public interface Redrawable extends PaintListener { /** * Calls paintControl if the object is visible. */ public void redraw( PaintEvent e ); }<|fim▁end|>
<|file_name|>take.rs<|end_file_name|><|fim▁begin|>use {Buf}; use std::cmp; /// A `Buf` adapter which limits the bytes read from an underlying buffer. /// /// This struct is generally created by calling `take()` on `Buf`. See /// documentation of [`take()`](trait.Buf.html#method.take) for more details. #[derive(Debug)] pub struct Take<T> { inner: T, limit: usize, } pub fn new<T>(inner: T, limit: usize) -> Take<T> { Take { inner: inner, limit: limit, } } impl<T> Take<T> { /// Consumes this `Take`, returning the underlying value. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// let mut buf = buf.into_inner(); /// /// dst.clear(); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo world"[..]); /// ``` pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// /// assert_eq!(0, buf.get_ref().position()); /// ``` pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying `Buf`. /// /// It is inadvisable to directly read from the underlying `Buf`. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// buf.get_mut().set_position(2); /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"ll"[..]); /// ``` pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than indicated by this method then /// that is the actual number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// /// assert_eq!(2, buf.limit()); /// assert_eq!(b'h', buf.get_u8()); /// assert_eq!(1, buf.limit()); /// ``` pub fn limit(&self) -> usize { self.limit } /// Sets the maximum number of bytes that can be read. /// /// # Note /// /// If the inner `Buf` has fewer bytes than `lim` then that is the actual /// number of available bytes. /// /// # Examples /// /// ```rust /// use bytes::{Buf, BufMut}; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"hello world").take(2); /// let mut dst = vec![]; /// /// dst.put(&mut buf); /// assert_eq!(*dst, b"he"[..]); /// /// dst.clear(); /// /// buf.set_limit(3); /// dst.put(&mut buf); /// assert_eq!(*dst, b"llo"[..]); /// ```<|fim▁hole|> } } impl<T: Buf> Buf for Take<T> { fn remaining(&self) -> usize { cmp::min(self.inner.remaining(), self.limit) } fn bytes(&self) -> &[u8] { let bytes = self.inner.bytes(); &bytes[..cmp::min(bytes.len(), self.limit)] } fn advance(&mut self, cnt: usize) { assert!(cnt <= self.limit); self.inner.advance(cnt); self.limit -= cnt; } }<|fim▁end|>
pub fn set_limit(&mut self, lim: usize) { self.limit = lim
<|file_name|>20170311204915-create-choice.js<|end_file_name|><|fim▁begin|>'use strict'; module.exports = { up: function(queryInterface, Sequelize) { return queryInterface.createTable('Choices', { id: { allowNull: false, autoIncrement: true, primaryKey: true, type: Sequelize.INTEGER }, QuestionId: { type: Sequelize.UUID }, text: { type: Sequelize.TEXT }, createdAt: { allowNull: false, type: Sequelize.DATE }, updatedAt: { allowNull: false, type: Sequelize.DATE } }); }, down: function(queryInterface, Sequelize) {<|fim▁hole|><|fim▁end|>
return queryInterface.dropTable('Choices'); } };
<|file_name|>AddonManager_ko.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS> <TS version="1.1" language="ko" sourcelanguage="en"> <context> <name>AddonInstaller</name> <message><|fim▁hole|> <source>Installed location</source> <translation>설치 위치</translation> </message> </context> <context> <name>AddonsInstaller</name> <message> <location filename="addonmanager_macro.py" line="157"/> <source>Unable to fetch the code of this macro.</source> <translation>이 매크로의 코드를 가져올 수 없습니다.</translation> </message> <message> <location filename="addonmanager_macro.py" line="164"/> <source>Unable to retrieve a description for this macro.</source> <translation type="unfinished">Unable to retrieve a description for this macro.</translation> </message> <message> <location filename="AddonManager.py" line="86"/> <source>The addons that can be installed here are not officially part of FreeCAD, and are not reviewed by the FreeCAD team. Make sure you know what you are installing!</source> <translation type="unfinished">The addons that can be installed here are not officially part of FreeCAD, and are not reviewed by the FreeCAD team. Make sure you know what you are installing!</translation> </message> <message> <location filename="AddonManager.py" line="199"/> <source>Addon manager</source> <translation>추가 기능 관리</translation> </message> <message> <location filename="AddonManager.py" line="204"/> <source>You must restart FreeCAD for changes to take effect. Press Ok to restart FreeCAD now, or Cancel to restart later.</source> <translation>추가기능을 사용하려면 FreeCAD를 재시작해야 합니다. 지금 바로 재시작하려면 OK버튼, 나중에 재시작하려면 취소버튼을 누르세요.</translation> </message> <message> <location filename="AddonManager.py" line="243"/> <source>Checking for updates...</source> <translation>업데이트 확인 중...</translation> </message> <message> <location filename="AddonManager.py" line="262"/> <source>Apply</source> <translation>적용</translation> </message> <message> <location filename="AddonManager.py" line="263"/> <source>update(s)</source> <translation>업데이트</translation> </message> <message> <location filename="AddonManager.py" line="266"/> <source>No update available</source> <translation>사용가능한 업데이트가 없습니다.</translation> </message> <message> <location filename="AddonManager.py" line="433"/> <source>Macro successfully installed. The macro is now available from the Macros dialog.</source> <translation>매크로가 설치되었습니다. 이제 매크로 창을 열어 사용할 수 있습니다.</translation> </message> <message> <location filename="AddonManager.py" line="435"/> <source>Unable to install</source> <translation>설치할 수 없습니다.</translation> </message> <message> <location filename="AddonManager.py" line="494"/> <source>Addon successfully removed. Please restart FreeCAD</source> <translation>추가기능이 제거되었습니다. FreeCAD를 다시 시작 해주세요.</translation> </message> <message> <location filename="AddonManager.py" line="496"/> <source>Unable to remove this addon</source> <translation>이 추가기능을 제거할 수 없습니다.</translation> </message> <message> <location filename="AddonManager.py" line="502"/> <source>Macro successfully removed.</source> <translation>매크로가 제거되었습니다.</translation> </message> <message> <location filename="AddonManager.py" line="504"/> <source>Macro could not be removed.</source> <translation>매크로를 제거할 수 없습니다.</translation> </message> <message> <location filename="addonmanager_workers.py" line="167"/> <source>Unable to download addon list.</source> <translation type="unfinished">Unable to download addon list.</translation> </message> <message> <location filename="addonmanager_workers.py" line="172"/> <source>Workbenches list was updated.</source> <translation>작업대 목록이 갱신되었습니다.</translation> </message> <message> <location filename="addonmanager_workers.py" line="734"/> <source>Outdated GitPython detected, consider upgrading with pip.</source> <translation type="unfinished">Outdated GitPython detected, consider upgrading with pip.</translation> </message> <message> <location filename="addonmanager_workers.py" line="292"/> <source>List of macros successfully retrieved.</source> <translation type="unfinished">List of macros successfully retrieved.</translation> </message> <message> <location filename="addonmanager_workers.py" line="647"/> <source>Retrieving description...</source> <translation type="unfinished">Retrieving description...</translation> </message> <message> <location filename="addonmanager_workers.py" line="387"/> <source>Retrieving info from</source> <translation type="unfinished">Retrieving info from</translation> </message> <message> <location filename="addonmanager_workers.py" line="529"/> <source>An update is available for this addon.</source> <translation>이 추가기능을 업데이트 할 수 있습니다.</translation> </message> <message> <location filename="addonmanager_workers.py" line="517"/> <source>This addon is already installed.</source> <translation>이 추가기능은 이미 설치되어 있습니다.</translation> </message> <message> <location filename="addonmanager_workers.py" line="649"/> <source>Retrieving info from git</source> <translation type="unfinished">Retrieving info from git</translation> </message> <message> <location filename="addonmanager_workers.py" line="652"/> <source>Retrieving info from wiki</source> <translation type="unfinished">Retrieving info from wiki</translation> </message> <message> <location filename="addonmanager_workers.py" line="696"/> <source>GitPython not found. Using standard download instead.</source> <translation type="unfinished">GitPython not found. Using standard download instead.</translation> </message> <message> <location filename="addonmanager_workers.py" line="701"/> <source>Your version of python doesn&apos;t appear to support ZIP files. Unable to proceed.</source> <translation type="unfinished">Your version of python doesn&apos;t appear to support ZIP files. Unable to proceed.</translation> </message> <message> <location filename="addonmanager_workers.py" line="782"/> <source>Workbench successfully installed. Please restart FreeCAD to apply the changes.</source> <translation>설치완료.새로 추가한 작업대를 사용하려면 FreeCAD를 다시 시작하세요.</translation> </message> <message> <location filename="addonmanager_workers.py" line="831"/> <source>Missing workbench</source> <translation>작업대 누락</translation> </message> <message> <location filename="addonmanager_workers.py" line="840"/> <source>Missing python module</source> <translation type="unfinished">Missing python module</translation> </message> <message> <location filename="addonmanager_workers.py" line="850"/> <source>Missing optional python module (doesn&apos;t prevent installing)</source> <translation type="unfinished">Missing optional python module (doesn&apos;t prevent installing)</translation> </message> <message> <location filename="addonmanager_workers.py" line="853"/> <source>Some errors were found that prevent to install this workbench</source> <translation type="unfinished">Some errors were found that prevent to install this workbench</translation> </message> <message> <location filename="addonmanager_workers.py" line="855"/> <source>Please install the missing components first.</source> <translation type="unfinished">Please install the missing components first.</translation> </message> <message> <location filename="addonmanager_workers.py" line="876"/> <source>Error: Unable to download</source> <translation>오류: 내려받을 수 없음</translation> </message> <message> <location filename="addonmanager_workers.py" line="889"/> <source>Successfully installed</source> <translation>설치 완료</translation> </message> <message> <location filename="addonmanager_workers.py" line="306"/> <source>GitPython not installed! Cannot retrieve macros from git</source> <translation type="unfinished">GitPython not installed! Cannot retrieve macros from git</translation> </message> <message> <location filename="AddonManager.py" line="567"/> <source>Installed</source> <translation>설치됨</translation> </message> <message> <location filename="AddonManager.py" line="586"/> <source>Update available</source> <translation>사용 가능한 업데이트가 있습니다</translation> </message> <message> <location filename="AddonManager.py" line="542"/> <source>Restart required</source> <translation>다시 시작 필요</translation> </message> <message> <location filename="addonmanager_workers.py" line="661"/> <source>This macro is already installed.</source> <translation>이 매크로는 이미 설치되어 있습니다.</translation> </message> <message> <location filename="addonmanager_workers.py" line="795"/> <source>A macro has been installed and is available under Macro -&gt; Macros menu</source> <translation type="unfinished">A macro has been installed and is available under Macro -&gt; Macros menu</translation> </message> <message> <location filename="addonmanager_workers.py" line="543"/> <source>This addon is marked as obsolete</source> <translation type="unfinished">This addon is marked as obsolete</translation> </message> <message> <location filename="addonmanager_workers.py" line="547"/> <source>This usually means it is no longer maintained, and some more advanced addon in this list provides the same functionality.</source> <translation type="unfinished">This usually means it is no longer maintained, and some more advanced addon in this list provides the same functionality.</translation> </message> <message> <location filename="addonmanager_workers.py" line="869"/> <source>Error: Unable to locate zip from</source> <translation type="unfinished">Error: Unable to locate zip from</translation> </message> <message> <location filename="addonmanager_workers.py" line="315"/> <source>Something went wrong with the Git Macro Retrieval, possibly the Git executable is not in the path</source> <translation type="unfinished">Something went wrong with the Git Macro Retrieval, possibly the Git executable is not in the path</translation> </message> <message> <location filename="addonmanager_workers.py" line="555"/> <source>This addon is marked as Python 2 Only</source> <translation type="unfinished">This addon is marked as Python 2 Only</translation> </message> <message> <location filename="addonmanager_workers.py" line="560"/> <source>This workbench may no longer be maintained and installing it on a Python 3 system will more than likely result in errors at startup or while in use.</source> <translation type="unfinished">This workbench may no longer be maintained and installing it on a Python 3 system will more than likely result in errors at startup or while in use.</translation> </message> <message> <location filename="addonmanager_workers.py" line="723"/> <source>User requested updating a Python 2 workbench on a system running Python 3 - </source> <translation type="unfinished">User requested updating a Python 2 workbench on a system running Python 3 - </translation> </message> <message> <location filename="addonmanager_workers.py" line="759"/> <source>Workbench successfully updated. Please restart FreeCAD to apply the changes.</source> <translation type="unfinished">Workbench successfully updated. Please restart FreeCAD to apply the changes.</translation> </message> <message> <location filename="addonmanager_workers.py" line="767"/> <source>User requested installing a Python 2 workbench on a system running Python 3 - </source> <translation type="unfinished">User requested installing a Python 2 workbench on a system running Python 3 - </translation> </message> <message> <location filename="addonmanager_workers.py" line="339"/> <source>Appears to be an issue connecting to the Wiki, therefore cannot retrieve Wiki macro list at this time</source> <translation type="unfinished">Appears to be an issue connecting to the Wiki, therefore cannot retrieve Wiki macro list at this time</translation> </message> <message> <location filename="addonmanager_workers.py" line="429"/> <source>Raw markdown displayed</source> <translation type="unfinished">Raw markdown displayed</translation> </message> <message> <location filename="addonmanager_workers.py" line="431"/> <source>Python Markdown library is missing.</source> <translation type="unfinished">Python Markdown library is missing.</translation> </message> </context> <context> <name>Dialog</name> <message> <location filename="AddonManager.ui" line="37"/> <source>Workbenches</source> <translation>워크벤치</translation> </message> <message> <location filename="AddonManager.ui" line="47"/> <source>Macros</source> <translation>매크로</translation> </message> <message> <location filename="AddonManager.ui" line="59"/> <source>Execute</source> <translation>실행</translation> </message> <message> <location filename="AddonManager.ui" line="113"/> <source>Downloading info...</source> <translation type="unfinished">Downloading info...</translation> </message> <message> <location filename="AddonManager.ui" line="150"/> <source>Update all</source> <translation>모두 업데이트</translation> </message> <message> <location filename="AddonManager.ui" line="56"/> <source>Executes the selected macro, if installed</source> <translation type="unfinished">Executes the selected macro, if installed</translation> </message> <message> <location filename="AddonManager.ui" line="127"/> <source>Uninstalls a selected macro or workbench</source> <translation>선택한 매크로 또는 작업대 제거</translation> </message> <message> <location filename="AddonManager.ui" line="137"/> <source>Installs or updates the selected macro or workbench</source> <translation>선택된 매크로/작업대 설치 또는 업데이트</translation> </message> <message> <location filename="AddonManager.ui" line="147"/> <source>Download and apply all available updates</source> <translation>가능한 업데이트를 내려받고 적용</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="35"/> <source>Custom repositories (one per line):</source> <translation type="unfinished">Custom repositories (one per line):</translation> </message> <message> <location filename="AddonManager.ui" line="89"/> <source>Sets configuration options for the Addon Manager</source> <translation type="unfinished">Sets configuration options for the Addon Manager</translation> </message> <message> <location filename="AddonManager.ui" line="92"/> <source>Configure...</source> <translation type="unfinished">Configure...</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="14"/> <source>Addon manager options</source> <translation>추가 관리 옵션</translation> </message> <message> <location filename="AddonManager.ui" line="130"/> <source>Uninstall selected</source> <translation>선택된 항목 제거</translation> </message> <message> <location filename="AddonManager.ui" line="140"/> <source>Install/update selected</source> <translation type="unfinished">Install/update selected</translation> </message> <message> <location filename="AddonManager.ui" line="160"/> <source>Close</source> <translation>닫기</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="20"/> <source>If this option is selected, when launching the Addon Manager, installed addons will be checked for available updates (this requires the GitPython package installed on your system)</source> <translation type="unfinished">If this option is selected, when launching the Addon Manager, installed addons will be checked for available updates (this requires the GitPython package installed on your system)</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="25"/> <source>Automatically check for updates at start (requires GitPython)</source> <translation type="unfinished">Automatically check for updates at start (requires GitPython)</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="57"/> <source>Proxy </source> <translation type="unfinished">Proxy </translation> </message> <message> <location filename="AddonManagerOptions.ui" line="64"/> <source>No proxy</source> <translation type="unfinished">No proxy</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="71"/> <source>User system proxy</source> <translation type="unfinished">User system proxy</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="78"/> <source>User defined proxy :</source> <translation type="unfinished">User defined proxy :</translation> </message> <message> <location filename="AddonManager.ui" line="14"/> <source>Addon Manager</source> <translation type="unfinished">Addon Manager</translation> </message> <message> <location filename="AddonManager.ui" line="157"/> <source>Close the Addon Manager</source> <translation type="unfinished">Close the Addon Manager</translation> </message> <message> <location filename="AddonManagerOptions.ui" line="42"/> <source>You can use this window to specify additional addon repositories to be scanned for available addons</source> <translation type="unfinished">You can use this window to specify additional addon repositories to be scanned for available addons</translation> </message> </context> <context> <name>Std_AddonMgr</name> <message> <location filename="AddonManager.py" line="68"/> <source>&amp;Addon manager</source> <translation type="unfinished">&amp;Addon manager</translation> </message> <message> <location filename="AddonManager.py" line="69"/> <source>Manage external workbenches and macros</source> <translation type="unfinished">Manage external workbenches and macros</translation> </message> </context> </TS><|fim▁end|>
<location filename="addonmanager_workers.py" line="531"/>
<|file_name|>struct_destructuring_cross_crate.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed<|fim▁hole|>pub struct S { pub x: int, pub y: int, }<|fim▁end|>
// except according to those terms. #[crate_type="lib"];
<|file_name|>register.go<|end_file_name|><|fim▁begin|>/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at<|fim▁hole|> http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package authentication import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/api" ) // GroupName is the group name use in this package const GroupName = "authentication.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &api.ListOptions{}, &api.DeleteOptions{}, &metav1.ExportOptions{}, &metav1.GetOptions{}, &TokenReview{}, ) return nil }<|fim▁end|>
<|file_name|>multipart_parser.cc<|end_file_name|><|fim▁begin|>// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/fetch/multipart_parser.h" #include "base/cxx17_backports.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/renderer/platform/network/http_names.h" #include "third_party/blink/renderer/platform/network/http_parsers.h" #include "third_party/blink/renderer/platform/wtf/std_lib_extras.h" #include <algorithm> #include <utility> namespace blink { namespace { constexpr char kCloseDelimiterSuffix[] = "--\r\n"; constexpr size_t kCloseDelimiterSuffixSize = base::size(kCloseDelimiterSuffix) - 1u; constexpr size_t kDashBoundaryOffset = 2u; // The length of "\r\n". constexpr char kDelimiterSuffix[] = "\r\n"; constexpr size_t kDelimiterSuffixSize = base::size(kDelimiterSuffix) - 1u; } // namespace MultipartParser::Matcher::Matcher() = default; MultipartParser::Matcher::Matcher(const char* data, size_t num_matched_bytes, size_t size) : data_(data), num_matched_bytes_(num_matched_bytes), size_(size) {} bool MultipartParser::Matcher::Match(const char* first, const char* last) { while (first < last) { if (!Match(*first++)) return false; } return true; } void MultipartParser::Matcher::SetNumMatchedBytes(size_t num_matched_bytes) { DCHECK_LE(num_matched_bytes, size_); num_matched_bytes_ = num_matched_bytes; } MultipartParser::MultipartParser(Vector<char> boundary, Client* client) : client_(client), delimiter_(std::move(boundary)), state_(State::kParsingPreamble) { // The delimiter consists of "\r\n" and a dash boundary which consists of // "--" and a boundary. delimiter_.push_front("\r\n--", 4u); matcher_ = DelimiterMatcher(kDashBoundaryOffset); } bool MultipartParser::AppendData(const char* bytes, size_t size) { DCHECK_NE(State::kFinished, state_); DCHECK_NE(State::kCancelled, state_); const char* const bytes_end = bytes + size; while (bytes < bytes_end) { switch (state_) { case State::kParsingPreamble: // Parse either a preamble and a delimiter or a dash boundary. ParseDelimiter(&bytes, bytes_end); if (!matcher_.IsMatchComplete() && bytes < bytes_end) { // Parse a preamble data (by ignoring it) and then a delimiter. matcher_.SetNumMatchedBytes(0u); ParseDataAndDelimiter(&bytes, bytes_end); } if (matcher_.IsMatchComplete()) { // Prepare for a delimiter suffix. matcher_ = DelimiterSuffixMatcher(); state_ = State::kParsingDelimiterSuffix; } break; case State::kParsingDelimiterSuffix: // Parse transport padding and "\r\n" after a delimiter. // This state can be reached after either a preamble or part // octets are parsed. if (matcher_.NumMatchedBytes() == 0u) ParseTransportPadding(&bytes, bytes_end); while (bytes < bytes_end) { if (!matcher_.Match(*bytes++)) return false; if (matcher_.IsMatchComplete()) { // Prepare for part header fields. state_ = State::kParsingPartHeaderFields; break; } } break; case State::kParsingPartHeaderFields: { // Parse part header fields (which ends with "\r\n") and an empty // line (which also ends with "\r\n"). // This state can be reached after a delimiter and a delimiter // suffix after either a preamble or part octets are parsed. HTTPHeaderMap header_fields; if (ParseHeaderFields(&bytes, bytes_end, &header_fields)) { // Prepare for part octets. matcher_ = DelimiterMatcher(); state_ = State::kParsingPartOctets; client_->PartHeaderFieldsInMultipartReceived(header_fields); } break; } case State::kParsingPartOctets: { // Parse part octets and a delimiter. // This state can be reached only after part header fields are // parsed. const size_t num_initially_matched_bytes = matcher_.NumMatchedBytes(); const char* octets_begin = bytes; ParseDelimiter(&bytes, bytes_end); if (!matcher_.IsMatchComplete() && bytes < bytes_end) { if (matcher_.NumMatchedBytes() >= num_initially_matched_bytes && num_initially_matched_bytes > 0u) { // Since the matched bytes did not form a complete // delimiter, the matched bytes turned out to be octet // bytes instead of being delimiter bytes. Additionally, // some of the matched bytes are from the previous call and // are therefore not in the range [octetsBegin, bytesEnd[. client_->PartDataInMultipartReceived(matcher_.Data(), matcher_.NumMatchedBytes()); if (state_ != State::kParsingPartOctets) break; octets_begin = bytes; } matcher_.SetNumMatchedBytes(0u); ParseDataAndDelimiter(&bytes, bytes_end); const char* const octets_end = bytes - matcher_.NumMatchedBytes(); if (octets_begin < octets_end) { client_->PartDataInMultipartReceived( octets_begin, static_cast<size_t>(octets_end - octets_begin)); if (state_ != State::kParsingPartOctets) break; } } if (matcher_.IsMatchComplete()) { state_ = State::kParsingDelimiterOrCloseDelimiterSuffix; client_->PartDataInMultipartFullyReceived(); } break; } <|fim▁hole|> case State::kParsingDelimiterOrCloseDelimiterSuffix: // Determine whether this is a delimiter suffix or a close // delimiter suffix. // This state can be reached only after part octets are parsed. if (*bytes == '-') { // Prepare for a close delimiter suffix. matcher_ = CloseDelimiterSuffixMatcher(); state_ = State::kParsingCloseDelimiterSuffix; } else { // Prepare for a delimiter suffix. matcher_ = DelimiterSuffixMatcher(); state_ = State::kParsingDelimiterSuffix; } break; case State::kParsingCloseDelimiterSuffix: // Parse "--", transport padding and "\r\n" after a delimiter // (a delimiter and "--" constitute a close delimiter). // This state can be reached only after part octets are parsed. for (;;) { if (matcher_.NumMatchedBytes() == 2u) ParseTransportPadding(&bytes, bytes_end); if (bytes >= bytes_end) break; if (!matcher_.Match(*bytes++)) return false; if (matcher_.IsMatchComplete()) { // Prepare for an epilogue. state_ = State::kParsingEpilogue; break; } } break; case State::kParsingEpilogue: // Parse an epilogue (by ignoring it). // This state can be reached only after a delimiter and a close // delimiter suffix after part octets are parsed. return true; case State::kCancelled: case State::kFinished: // The client changed the state. return false; } } DCHECK_EQ(bytes_end, bytes); return true; } void MultipartParser::Cancel() { state_ = State::kCancelled; } bool MultipartParser::Finish() { DCHECK_NE(State::kCancelled, state_); DCHECK_NE(State::kFinished, state_); const State initial_state = state_; state_ = State::kFinished; switch (initial_state) { case State::kParsingPartOctets: if (matcher_.NumMatchedBytes() > 0u) { // Since the matched bytes did not form a complete delimiter, // the matched bytes turned out to be octet bytes instead of being // delimiter bytes. client_->PartDataInMultipartReceived(matcher_.Data(), matcher_.NumMatchedBytes()); } return false; case State::kParsingCloseDelimiterSuffix: // Require a full close delimiter consisting of a delimiter and "--" // but ignore missing or partial "\r\n" after that. return matcher_.NumMatchedBytes() >= 2u; case State::kParsingEpilogue: return true; default: return false; } } MultipartParser::Matcher MultipartParser::CloseDelimiterSuffixMatcher() const { return Matcher(kCloseDelimiterSuffix, 0u, kCloseDelimiterSuffixSize); } MultipartParser::Matcher MultipartParser::DelimiterMatcher( size_t num_already_matched_bytes) const { return Matcher(delimiter_.data(), num_already_matched_bytes, delimiter_.size()); } MultipartParser::Matcher MultipartParser::DelimiterSuffixMatcher() const { return Matcher(kDelimiterSuffix, 0u, kDelimiterSuffixSize); } void MultipartParser::ParseDataAndDelimiter(const char** bytes_pointer, const char* bytes_end) { DCHECK_EQ(0u, matcher_.NumMatchedBytes()); // Search for a complete delimiter within the bytes. const char* delimiter_begin = std::search( *bytes_pointer, bytes_end, delimiter_.begin(), delimiter_.end()); if (delimiter_begin != bytes_end) { // A complete delimiter was found. The bytes before that are octet // bytes. const char* const delimiter_end = delimiter_begin + delimiter_.size(); const bool matched = matcher_.Match(delimiter_begin, delimiter_end); DCHECK(matched); DCHECK(matcher_.IsMatchComplete()); *bytes_pointer = delimiter_end; } else { // Search for a partial delimiter in the end of the bytes. const size_t size = static_cast<size_t>(bytes_end - *bytes_pointer); for (delimiter_begin = bytes_end - std::min(static_cast<size_t>(delimiter_.size() - 1u), size); delimiter_begin < bytes_end; ++delimiter_begin) { if (matcher_.Match(delimiter_begin, bytes_end)) break; matcher_.SetNumMatchedBytes(0u); } // If a partial delimiter was found in the end of bytes, the bytes // before the partial delimiter are definitely octets bytes and // the partial delimiter bytes are buffered for now. // If a partial delimiter was not found in the end of bytes, all bytes // are definitely octets bytes. // In all cases, all bytes are parsed now. *bytes_pointer = bytes_end; } DCHECK(matcher_.IsMatchComplete() || *bytes_pointer == bytes_end); } void MultipartParser::ParseDelimiter(const char** bytes_pointer, const char* bytes_end) { DCHECK(!matcher_.IsMatchComplete()); while (*bytes_pointer < bytes_end && matcher_.Match(*(*bytes_pointer))) { ++(*bytes_pointer); if (matcher_.IsMatchComplete()) break; } } bool MultipartParser::ParseHeaderFields(const char** bytes_pointer, const char* bytes_end, HTTPHeaderMap* header_fields) { // Combine the current bytes with buffered header bytes if needed. const char* header_bytes = *bytes_pointer; if ((bytes_end - *bytes_pointer) > std::numeric_limits<wtf_size_t>::max()) return false; wtf_size_t header_size = static_cast<wtf_size_t>(bytes_end - *bytes_pointer); if (!buffered_header_bytes_.IsEmpty()) { buffered_header_bytes_.Append(header_bytes, header_size); header_bytes = buffered_header_bytes_.data(); header_size = buffered_header_bytes_.size(); } wtf_size_t end = 0u; if (!ParseMultipartFormHeadersFromBody(header_bytes, header_size, header_fields, &end)) { // Store the current header bytes for the next call unless that has // already been done. if (buffered_header_bytes_.IsEmpty()) buffered_header_bytes_.Append(header_bytes, header_size); *bytes_pointer = bytes_end; return false; } buffered_header_bytes_.clear(); *bytes_pointer = bytes_end - (header_size - end); return true; } void MultipartParser::ParseTransportPadding(const char** bytes_pointer, const char* bytes_end) const { while (*bytes_pointer < bytes_end && (*(*bytes_pointer) == '\t' || *(*bytes_pointer) == ' ')) ++(*bytes_pointer); } void MultipartParser::Trace(Visitor* visitor) const { visitor->Trace(client_); } } // namespace blink<|fim▁end|>
<|file_name|>OpsDeviceManager.py<|end_file_name|><|fim▁begin|>""" config of ops service, Control by OPS devices @author: opsdev """ import httplib import json from OpsServiceConfig import OPS_SERVICE_CONFIG class OpsDeviceManager(): def __init__(self,config): self.server = config["server"] self.port = config["port"] self.addAction = "POST" self.getAction = "GET" self.modifyAction = "PUT" self.deleteAction = "DELETE" def addDevice(self,data): return self.restcall("/devices", data, self.addAction) def delDevice(self,deviceid): path = "/devices/%s" % (deviceid) return self.restcall(path, {}, self.deleteAction) def modifyDevice(self,data): return self.restcall("/devices", data, self.modifyAction) def getDevice(self,deviceid): path = "/devices/%s" % (deviceid) return self.restcall(path, "", self.getAction) def getDevices(self): return self.restcall("/devices", "", self.getAction) def restcall(self, opspath, data, action): conn = httplib.HTTPConnection(self.server, self.port)<|fim▁hole|> conn.request(action, opspath, body) elif action == self.getAction: conn.request(action, opspath) elif action == self.modifyAction: conn.request(action, opspath, body) elif action == self.deleteAction: conn.request(action, opspath, body) response = conn.getresponse() ret = (response.status, response.reason, response.read()) conn.close() return ret<|fim▁end|>
body = json.dumps(data) if action == self.addAction:
<|file_name|>attr_invalid2.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 Robert Clipsham <[email protected]> // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(custom_attribute, plugin)] #![plugin(nopanic)]<|fim▁hole|>fn main() {}<|fim▁end|>
#[nopanic = "something"] //~ ERROR: #[nopanic] does not have arguments or values pub fn foo() {}
<|file_name|>validation_job.py<|end_file_name|><|fim▁begin|>''' Created on Mar 19, 2014 @author: Simon ''' from engine.engine_job import EngineJob class ValidationJob(EngineJob): ''' M/R job for validating a trained model. ''' def mapper(self, key, values): data_processor = self.get_data_processor() data_processor.set_data(values) data_processor.normalize_data(self.data_handler.get_statistics()) data_set = data_processor.get_data_set() alg = self.get_trained_alg() validator = self.get_validator() yield 'validation', validator.validate(alg, data_set) def reducer(self, key, values):<|fim▁hole|> yield key, self.get_validator().aggregate(vals) if __name__ == '__main__': ValidationJob.run()<|fim▁end|>
vals = list(values)
<|file_name|>setup_package.py<|end_file_name|><|fim▁begin|>from distutils.core import Extension from collections import defaultdict def get_extensions(): import numpy as np exts = []<|fim▁hole|> mac_incl_path = "/usr/include/malloc" cfg = defaultdict(list) cfg['include_dirs'].append(np.get_include()) cfg['include_dirs'].append(mac_incl_path) cfg['include_dirs'].append('gala/potential') cfg['extra_compile_args'].append('--std=gnu99') cfg['sources'].append('gala/integrate/cyintegrators/leapfrog.pyx') cfg['sources'].append('gala/potential/potential/src/cpotential.c') exts.append(Extension('gala.integrate.cyintegrators.leapfrog', **cfg)) cfg = defaultdict(list) cfg['include_dirs'].append(np.get_include()) cfg['include_dirs'].append(mac_incl_path) cfg['include_dirs'].append('gala/potential') cfg['extra_compile_args'].append('--std=gnu99') cfg['sources'].append('gala/potential/hamiltonian/src/chamiltonian.c') cfg['sources'].append('gala/potential/potential/src/cpotential.c') cfg['sources'].append('gala/integrate/cyintegrators/dop853.pyx') cfg['sources'].append('gala/integrate/cyintegrators/dopri/dop853.c') exts.append(Extension('gala.integrate.cyintegrators.dop853', **cfg)) cfg = defaultdict(list) cfg['include_dirs'].append(np.get_include()) cfg['include_dirs'].append(mac_incl_path) cfg['include_dirs'].append('gala/potential') cfg['extra_compile_args'].append('--std=gnu99') cfg['sources'].append('gala/integrate/cyintegrators/ruth4.pyx') cfg['sources'].append('gala/potential/potential/src/cpotential.c') exts.append(Extension('gala.integrate.cyintegrators.ruth4', **cfg)) return exts<|fim▁end|>
# malloc
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import ( scoped_session, sessionmaker, ) from zope.sqlalchemy import ZopeTransactionExtension<|fim▁hole|>from handlers.index import IndexHandler from handlers.sensors import SensorsHandler import logging logging.getLogger().setLevel(logging.DEBUG) app = tornado.web.Application([ (r'/', IndexHandler), (r'/sensors', SensorsHandler) ]) DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())) Base = declarative_base()<|fim▁end|>
import tornado.web
<|file_name|>qgserrordialog.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************** qgserrordialog.cpp - error description ------------------- begin : October 2012 copyright : (C) October 2012 Radim Blazek email : radim dot blazek at gmail dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgserrordialog.h" #include <QMessageBox> #include <QSettings> QgsErrorDialog::QgsErrorDialog( const QgsError & theError, const QString & theTitle, QWidget *parent, const Qt::WindowFlags& fl ) : QDialog( parent, fl ) , mError( theError ) { setupUi( this ); QString title = theTitle; if ( title.isEmpty() ) title = tr( "Error" ); setWindowTitle( title ); // QMessageBox has static standardIcon( Icon icon ), but it is marked as obsolete QMessageBox messageBox( QMessageBox::Critical, "", "" ); mIconLabel->setPixmap( messageBox.iconPixmap() ); mSummaryTextBrowser->setOpenExternalLinks( true ); mDetailTextBrowser->setOpenExternalLinks( true ); mDetailTextBrowser->hide(); QPalette p = palette(); p.setColor( QPalette::Base, Qt::transparent ); mSummaryTextBrowser->setPalette( p ); mDetailCheckBox->hide(); mSummaryTextBrowser->setText( mError.summary() ); mDetailTextBrowser->setText( mError.message( QgsErrorMessage::Html ) ); resize( width(), 150 ); QSettings settings; Qt::CheckState state = ( Qt::CheckState ) settings.value( "/Error/dialog/detail", 0 ).toInt(); mDetailCheckBox->setCheckState( state ); if ( state == Qt::Checked ) on_mDetailPushButton_clicked(); } <|fim▁hole|> void QgsErrorDialog::show( const QgsError & theError, const QString & theTitle, QWidget *parent, const Qt::WindowFlags& fl ) { QgsErrorDialog d( theError, theTitle, parent, fl ); d.exec(); } void QgsErrorDialog::on_mDetailPushButton_clicked() { mSummaryTextBrowser->hide(); mDetailTextBrowser->show(); mDetailCheckBox->show(); mDetailPushButton->hide(); resize( width(), 400 ); } void QgsErrorDialog::on_mDetailCheckBox_stateChanged( int state ) { QSettings settings; settings.setValue( "/Error/dialog/detail", state ); }<|fim▁end|>
QgsErrorDialog::~QgsErrorDialog() { }
<|file_name|>RskAddress.java<|end_file_name|><|fim▁begin|>/* * This file is part of RskJ * Copyright (C) 2017 RSK Labs Ltd. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package co.rsk.core; import com.google.common.primitives.UnsignedBytes; import org.ethereum.rpc.TypeConverter; import org.ethereum.util.ByteUtil; import org.ethereum.vm.DataWord; import java.util.Arrays; import java.util.Comparator; /** * Immutable representation of an RSK address. * It is a simple wrapper on the raw byte[]. * * @author Ariel Mendelzon */ public class RskAddress { /** * This is the size of an RSK address in bytes. */ public static final int LENGTH_IN_BYTES = 20; private static final RskAddress NULL_ADDRESS = new RskAddress(); /** * This compares using the lexicographical order of the sender unsigned bytes. */ public static final Comparator<RskAddress> LEXICOGRAPHICAL_COMPARATOR = Comparator.comparing( RskAddress::getBytes, UnsignedBytes.lexicographicalComparator()); private final byte[] bytes; /** * @param address a data word containing an address in the last 20 bytes. */ public RskAddress(DataWord address) { this(address.getLast20Bytes()); } /** * @param address the hex-encoded 20 bytes long address, with or without 0x prefix. */ public RskAddress(String address) { this(TypeConverter.stringHexToByteArray(address)); } /** * @param bytes the 20 bytes long raw address bytes. */ public RskAddress(byte[] bytes) { if (bytes.length != LENGTH_IN_BYTES) { throw new RuntimeException(String.format("An RSK address must be %d bytes long", LENGTH_IN_BYTES)); } this.bytes = bytes; } /** * This instantiates the contract creation address. */ private RskAddress() { this.bytes = new byte[0]; } /**<|fim▁hole|> public static RskAddress nullAddress() { return NULL_ADDRESS; } public byte[] getBytes() { return bytes; } public String toHexString() { return ByteUtil.toHexString(bytes); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || this.getClass() != other.getClass()) { return false; } RskAddress otherSender = (RskAddress) other; return Arrays.equals(bytes, otherSender.bytes); } @Override public int hashCode() { return Arrays.hashCode(bytes); } /** * @return a DEBUG representation of the address, mainly used for logging. */ @Override public String toString() { return toHexString(); } public String toJsonString() { if (NULL_ADDRESS.equals(this)) { return null; } return TypeConverter.toUnformattedJsonHex(this.getBytes()); } }<|fim▁end|>
* @return the null address, which is the receiver of contract creation transactions. */
<|file_name|>RequiresEconomyModifier.java<|end_file_name|><|fim▁begin|>/* * This file is part of Nucleus, licensed under the MIT License (MIT). See the LICENSE.txt file * at the root of this project for more details. */ package io.github.nucleuspowered.nucleus.core.scaffold.command.modifier.impl; import io.github.nucleuspowered.nucleus.core.scaffold.command.ICommandContext; import io.github.nucleuspowered.nucleus.core.scaffold.command.annotation.CommandModifier; import io.github.nucleuspowered.nucleus.core.scaffold.command.control.CommandControl; import io.github.nucleuspowered.nucleus.core.scaffold.command.modifier.ICommandModifier; import io.github.nucleuspowered.nucleus.core.services.INucleusServiceCollection;<|fim▁hole|>import net.kyori.adventure.text.Component; import java.util.Optional; public class RequiresEconomyModifier implements ICommandModifier { @Override public Optional<Component> testRequirement(final ICommandContext source, final CommandControl control, final INucleusServiceCollection serviceCollection, final CommandModifier modifier) { if (!serviceCollection.economyServiceProvider().serviceExists()) { return Optional.of(serviceCollection.messageProvider().getMessageFor(source.cause().audience(), "command.economyrequired")); } return Optional.empty(); } }<|fim▁end|>
<|file_name|>32.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
export { NumberSmall_332 as default } from "../../";
<|file_name|>login.rs<|end_file_name|><|fim▁begin|>use std::io::prelude::*; use std::io; use cargo::ops; use cargo::core::{SourceId, Source}; use cargo::sources::RegistrySource; use cargo::util::{CliResult, CliError, Config}; #[derive(RustcDecodable)] struct Options { flag_host: Option<String>, arg_token: Option<String>, flag_verbose: bool, } pub const USAGE: &'static str = " Save an api token from the registry locally <|fim▁hole|>Usage: cargo login [options] [<token>] Options: -h, --help Print this message --host HOST Host to set the token for -v, --verbose Use verbose output "; pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> { config.shell().set_verbose(options.flag_verbose); let token = match options.arg_token.clone() { Some(token) => token, None => { let err = (|| { let src = try!(SourceId::for_central(config)); let mut src = RegistrySource::new(&src, config); try!(src.update()); let config = try!(src.config()); let host = options.flag_host.clone().unwrap_or(config.api); println!("please visit {}me and paste the API Token below", host); let mut line = String::new(); let input = io::stdin(); try!(input.lock().read_line(&mut line)); Ok(line) })(); try!(err.map_err(|e| CliError::from_boxed(e, 101))) } }; let token = token.trim().to_string(); try!(ops::registry_login(config, token).map_err(|e| { CliError::from_boxed(e, 101) })); Ok(None) }<|fim▁end|>
<|file_name|>match.js<|end_file_name|><|fim▁begin|>"use strict"; var valueToString = require("@sinonjs/commons").valueToString; var indexOf = require("@sinonjs/commons").prototypes.string.indexOf; var forEach = require("@sinonjs/commons").prototypes.array.forEach; var type = require("type-detect"); var engineCanCompareMaps = typeof Array.from === "function"; var deepEqual = require("./deep-equal").use(match); // eslint-disable-line no-use-before-define var isArrayType = require("./is-array-type"); var isSubset = require("./is-subset"); var createMatcher = require("./create-matcher"); /** * Returns true when `array` contains all of `subset` as defined by the `compare` * argument * * @param {Array} array An array to search for a subset * @param {Array} subset The subset to find in the array * @param {Function} compare A comparison function * @returns {boolean} [description] * @private */ function arrayContains(array, subset, compare) { if (subset.length === 0) { return true; } var i, l, j, k; for (i = 0, l = array.length; i < l; ++i) { if (compare(array[i], subset[0])) { for (j = 0, k = subset.length; j < k; ++j) { if (i + j >= l) { return false; } if (!compare(array[i + j], subset[j])) { return false; } } return true; } } return false; } /* eslint-disable complexity */ /** * Matches an object with a matcher (or value) * * @alias module:samsam.match * @param {object} object The object candidate to match * @param {object} matcherOrValue A matcher or value to match against * @returns {boolean} true when `object` matches `matcherOrValue` */ function match(object, matcherOrValue) { if (matcherOrValue && typeof matcherOrValue.test === "function") { return matcherOrValue.test(object); } switch (type(matcherOrValue)) { case "bigint": case "boolean": case "number": case "symbol": return matcherOrValue === object; case "function": return matcherOrValue(object) === true; case "string": var notNull = typeof object === "string" || Boolean(object); return ( notNull && indexOf( valueToString(object).toLowerCase(), matcherOrValue.toLowerCase() ) >= 0 ); case "null": return object === null; case "undefined": return typeof object === "undefined"; case "Date": /* istanbul ignore else */ if (type(object) === "Date") { return object.getTime() === matcherOrValue.getTime(); }<|fim▁hole|> break; case "Array": case "Int8Array": case "Uint8Array": case "Uint8ClampedArray": case "Int16Array": case "Uint16Array": case "Int32Array": case "Uint32Array": case "Float32Array": case "Float64Array": return ( isArrayType(matcherOrValue) && arrayContains(object, matcherOrValue, match) ); case "Map": /* istanbul ignore next: this is covered by a test, that is only run in IE, but we collect coverage information in node*/ if (!engineCanCompareMaps) { throw new Error( "The JavaScript engine does not support Array.from and cannot reliably do value comparison of Map instances" ); } return ( type(object) === "Map" && arrayContains( Array.from(object), Array.from(matcherOrValue), match ) ); default: break; } switch (type(object)) { case "null": return false; case "Set": return isSubset(matcherOrValue, object, match); default: break; } /* istanbul ignore else */ if (matcherOrValue && typeof matcherOrValue === "object") { if (matcherOrValue === object) { return true; } if (typeof object !== "object") { return false; } var prop; // eslint-disable-next-line guard-for-in for (prop in matcherOrValue) { var value = object[prop]; if ( typeof value === "undefined" && typeof object.getAttribute === "function" ) { value = object.getAttribute(prop); } if ( matcherOrValue[prop] === null || typeof matcherOrValue[prop] === "undefined" ) { if (value !== matcherOrValue[prop]) { return false; } } else if ( typeof value === "undefined" || !deepEqual(value, matcherOrValue[prop]) ) { return false; } } return true; } /* istanbul ignore next */ throw new Error("Matcher was an unknown or unsupported type"); } /* eslint-enable complexity */ forEach(Object.keys(createMatcher), function(key) { match[key] = createMatcher[key]; }); module.exports = match;<|fim▁end|>
/* istanbul ignore next: this is basically the rest of the function, which is covered */
<|file_name|>test_handle.py<|end_file_name|><|fim▁begin|># Copyright 2012 Kevin Goodsell # # This software is licensed under the Eclipse Public License (EPL) V1.0. <|fim▁hole|>import unittest import STAF class HandleTests(unittest.TestCase): def assertSTAFResultError(self, rc, func, *args, **kwargs): try: func(*args, **kwargs) self.fail('STAFResultError not raised') except STAF.STAFResultError, exc: self.assertEqual(exc.rc, rc) def testBasicHandle(self): with STAF.Handle('test handle') as h: result = h.submit('local', 'ping', 'ping') self.assertEqual(result, 'PONG') result = h.submit('local', 'ping', ['ping']) self.assertEqual(result, 'PONG') result = h.submit('local', 'service', 'list') services = dict((s['name'], s) for s in result) # There's not much reason to check all these, so just pick a few. self.assertEqual(services['DELAY'], {'name': 'DELAY', 'executable': None, 'library': '<Internal>'}) self.assertEqual(services['DIAG'], {'name': 'DIAG', 'executable': None, 'library': '<Internal>'}) self.assertEqual(services['ECHO'], {'name': 'ECHO', 'executable': None, 'library': '<Internal>'}) # Submit using a list result = h.submit('local', 'handle', ['list handles name', 'test handle', 'long']) self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 1) pieces = result[0] self.assertEqual(pieces['name'], 'test handle') self.assertEqual(pieces['state'], 'Registered') self.assertTrue(h.is_registered()) self.assertFalse(h.is_registered()) def testErrors(self): h = STAF.Handle('test handle') self.assertSTAFResultError(STAF.errors.UnknownService, h.submit, 'local', 'doesntexist', 'do magic') self.assertSTAFResultError(STAF.errors.InvalidRequestString, h.submit, 'local', 'ping', 'not a ping command') h.unregister() self.assertSTAFResultError(STAF.errors.HandleDoesNotExist, h.submit, 'local', 'ping', 'ping') # Unregistering a second time should not produce an error. h.unregister() def testStaticHandle(self): with STAF.Handle('helper') as helper: self.assertFalse(helper.is_static()) handle_num = helper.submit('local', 'handle', 'create handle name static-test') handle_num = int(handle_num) h = STAF.Handle(handle_num) self.assertTrue(h.is_static()) self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG') # Unregistering a static handle does nothing. h.unregister() self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG') # Delete the static handle helper.submit('local', 'handle', ['delete handle', str(h.handle_num())]) def testSyncModes(self): with STAF.Handle('test handle') as h: # FIRE AND FORGET req = h.submit('local', 'ping', 'ping', STAF.REQ_FIRE_AND_FORGET) self.assertTrue(req.isdigit()) time.sleep(2) # No queued result self.assertSTAFResultError(STAF.errors.NoQueueElement, h.submit, 'local', 'queue', 'get type STAF/RequestComplete') # No retained result self.assertSTAFResultError(STAF.errors.RequestNumberNotFound, h.submit, 'local', 'service', ['free request', req]) # QUEUE req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE) self.assertTrue(req.isdigit()) time.sleep(2) # Check queued result result = h.submit('local', 'queue', 'get type STAF/RequestComplete') msg = result['message'] self.assertEqual(msg['rc'], '0') self.assertEqual(msg['requestNumber'], req) self.assertEqual(msg['result'], 'PONG') # No retained result self.assertSTAFResultError(STAF.errors.RequestNumberNotFound, h.submit, 'local', 'service', ['free request', req]) # RETAIN req = h.submit('local', 'ping', 'ping', STAF.REQ_RETAIN) self.assertTrue(req.isdigit()) time.sleep(2) # No queued result self.assertSTAFResultError(STAF.errors.NoQueueElement, h.submit, 'local', 'queue', 'get type STAF/RequestComplete') # Check retained result result = h.submit('local', 'service', ['free request', req]) self.assertEqual(result['rc'], '0') self.assertEqual(result['result'], 'PONG') # QUEUE AND RETAIN req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE_RETAIN) self.assertTrue(req.isdigit()) time.sleep(2) # Check queued result result = h.submit('local', 'queue', 'get type STAF/RequestComplete') msg = result['message'] self.assertEqual(msg['rc'], '0') self.assertEqual(msg['requestNumber'], req) self.assertEqual(msg['result'], 'PONG') # Check retained result result = h.submit('local', 'service', ['free request', req]) self.assertEqual(result['rc'], '0') self.assertEqual(result['result'], 'PONG') if __name__ == '__main__': runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)<|fim▁end|>
from __future__ import with_statement import time
<|file_name|>comex_page_profile_controllers.js<|end_file_name|><|fim▁begin|>/**we have all * @fileoverview * Profile 1/overview and 2/completing * @todo * - package.json * * @version 1 * @copyright ISCPIF-CNRS 2016 * @author [email protected] * * @requires comex_user_shared, comex_lib_elts * * NB The uinfo variable should be set to template's user.json_info value. */ // 3 exposed vars for inline js controls var teamCityDiv = document.getElementById('lab_locname_div') var otherInstDiv = document.getElementById('other_inst_div') var otherInstTypeInput = document.getElementById('other_inst_type') // reselecting current_user's info choices function setupSavedItems(uinfo) { // (date and menu values are set up here // but normal text vals are set up via html template, // pic and middle_name are set below from a separate function, // and multi text inputs are set up via form init... fixable to harmonize) for (var i in cmxClt.COLS) { var colType = cmxClt.COLS[i][3] if (colType == 'd' || colType == 'm') { var colName = cmxClt.COLS[i][0] var chosenV = uinfo[colName] // special case if (colName == 'inst_type' && uinfo.insts && uinfo.insts.length) { chosenV = uinfo.insts[0].inst_type } // console.log('setupSavedItems', colName, '('+colType+')' , 'with', chosenV) // if the value is none => there's nothing to do if (chosenV != undefined && chosenV != null) { var tgtElt = document.getElementById(colName) if (tgtElt != null) { // d <=> convert to YY/MM/DD from iso string YYYY-MM-DD if (colType == 'd') { // console.log('setting date', colName, 'with', chosenV) tgtElt.value = chosenV.replace(/-/g,'/') tgtElt.dispatchEvent(new CustomEvent('change')) } // m <=> select saved menus if (colType == 'm') { // console.log('setting menu', colName, 'with', chosenV) var myOption = tgtElt.querySelector(`option[value="${chosenV}"]`) // normal case if (myOption) { tgtElt.selectedIndex = myOption.index tgtElt.dispatchEvent(new CustomEvent('change')) } // this case is really just for inst_type right now else if (tgtElt.querySelector(`option[value="other"]`)) { console.log('setting menu option other for', colName, 'with', chosenV) tgtElt.selectedIndex = tgtElt.querySelector(`option[value="other"]`).index tgtElt.dispatchEvent(new CustomEvent('change')) var relatedFreeTxt = document.getElementById('other_'+colName) if (relatedFreeTxt) { relatedFreeTxt.value = chosenV relatedFreeTxt.dispatchEvent(new CustomEvent('change')) } } // fallback case else { var optionOthers = console.warn(`setupSavedItems: couldn't find option: ${chosenV} for select element: ${colName}`) } } } else { console.warn("setupSavedItems: couldn't find element: "+colName) } } } } } // also pre-setup for images var picShow = document.getElementById('show_pic') if (uinfo.pic_src) { cmxClt.uform.showPic(uinfo.pic_src) } // initialize form controllers var theUForm = cmxClt.uform.Form( // id "comex_profile_form", // onkeyup function completionAsYouGo, // other params { 'multiTextinputs': [{'id':'keywords', 'prevals': uinfo.keywords, 'minEntries': 3 }, {'id':'hashtags', 'prevals': uinfo.hashtags, 'color': "#23A", 'minEntries': 3 }] } ) var deleteUser = document.getElementById('delete_user') deleteUser.checked = false setupSavedItems(uinfo) // monitor inst label (if any), so if label changes => reset inst_type (if any) var instLabelInput = document.getElementById('inst_label') var instTypeInput = document.getElementById('inst_type') var instLabelPreviousVal = instLabelInput.value instLabelInput.onblur = function () { if (instTypeInput.value) { if (instLabelPreviousVal != "" && ( !instLabelInput.value || (instLabelInput.value != instLabelPreviousVal))) { // we reset all inst_type block instTypeInput.value = '' otherInstDiv.style.display='none'; otherInstTypeInput.value=''; } } instLabelPreviousVal = instLabelInput.value // NB don't use uinfo because user may have already changed the form } instTypeInput.onblur = function() { instLabelPreviousVal = instLabelInput.value } // open middlename if there is one if (uinfo.middle_name != null && uinfo.middle_name != "" && uinfo.middle_name != "None") { console.log("showing midname for profile") cmxClt.uform.displayMidName() } // main validation function // ------------------------ function completionAsYouGo() { theUForm.elMainMessage.style.display = 'block' theUForm.elMainMessage.innerHTML = "Checking the answers..." var diagnosticParams = {'fixResidue': true, 'ignore': ['email']} cmxClt.uform.simpleValidateAndMessage(theUForm, diagnosticParams) // timestamp is done server-side } // run first check on existing profile data pre-filled by the template completionAsYouGo() // set up a "Your data was saved" modal box (tied to the SUBMIT button) function addAndShowModal(someHtmlContent) { // create and add modal cmxClt.elts.box.addGenericBox( 'save_info', 'Profile update', someHtmlContent, function(){window.location.reload()} ) // show modal var saveInfoModal = document.getElementById('save_info') saveInfoModal.style.display = 'block' saveInfoModal.style.opacity = 1 } function submitAndModal() { var formdat = theUForm.asFormData(); var postUrl = "/services/user/profile/" if (window.fetch) { fetch(postUrl, { method: 'POST', headers: {'X-Requested-With': 'MyFetchRequest'}, body: formdat, credentials: "same-origin" // <= allows our req to have id cookie }) .then(function(response) { if(response.ok) { response.text().then( function(bodyText) { // console.log("Profile POST was OK, showing answer") addAndShowModal(bodyText) }) } else { response.text().then( function(bodyText) { console.log("Profile POST failed, aborting and showing message") addAndShowModal("<h4>Profile POST server error:</h4>"+bodyText) }) } }) .catch(function(error) { console.warn('fetch error:'+error.message); });<|fim▁hole|> // also possible using old-style jquery ajax else { $.ajax({ contentType: false, // <=> multipart processData: false, // <=> multipart data: formdat, type: 'POST', url: postUrl, success: function(data) { addAndShowModal(data) }, error: function(result) { console.warn('jquery ajax error with result', result) } }); } } console.log("profile controllers load OK")<|fim▁end|>
}