prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>WorldSession.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2005-2010 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/** \file
\ingroup u2w
*/
#include "WorldSocket.h" // must be first to make ACE happy with ACE includes in it
#include "Common.h"
#include "Database/DatabaseEnv.h"
#include "Log.h"
#include "Opcodes.h"
#include "WorldPacket.h"
#include "WorldSession.h"
#include "Player.h"
#include "ObjectMgr.h"
#include "Group.h"
#include "Guild.h"
#include "World.h"
#include "BattleGroundMgr.h"
#include "MapManager.h"
#include "SocialMgr.h"
#include "Auth/AuthCrypt.h"
#include "Auth/HMACSHA1.h"
#include "zlib/zlib.h"
// select opcodes appropriate for processing in Map::Update context for current session state
static bool MapSessionFilterHelper(WorldSession* session, OpcodeHandler const& opHandle)
{
// we do not process thread-unsafe packets
if (opHandle.packetProcessing == PROCESS_THREADUNSAFE)
return false;
// we do not process not loggined player packets
Player * plr = session->GetPlayer();
if (!plr)
return false;
// in Map::Update() we do not process packets where player is not in world!
return plr->IsInWorld();
}
bool MapSessionFilter::Process(WorldPacket * packet)
{
OpcodeHandler const& opHandle = opcodeTable[packet->GetOpcode()];
if (opHandle.packetProcessing == PROCESS_INPLACE)
return true;
// let's check if our opcode can be really processed in Map::Update()
return MapSessionFilterHelper(m_pSession, opHandle);
}
// we should process ALL packets when player is not in world/logged in
// OR packet handler is not thread-safe!
bool WorldSessionFilter::Process(WorldPacket* packet)
{
OpcodeHandler const& opHandle = opcodeTable[packet->GetOpcode()];
// check if packet handler is supposed to be safe
if (opHandle.packetProcessing == PROCESS_INPLACE)
return true;
// let's check if our opcode can't be processed in Map::Update()
return !MapSessionFilterHelper(m_pSession, opHandle);
}
/// WorldSession constructor
WorldSession::WorldSession(uint32 id, WorldSocket *sock, AccountTypes sec, uint8 expansion, time_t mute_time, LocaleConstant locale) :
LookingForGroup_auto_join(false), LookingForGroup_auto_add(false), m_muteTime(mute_time),
_player(NULL), m_Socket(sock),_security(sec), _accountId(id), m_expansion(expansion), _logoutTime(0),
m_inQueue(false), m_playerLoading(false), m_playerLogout(false), m_playerRecentlyLogout(false), m_playerSave(false),
m_sessionDbcLocale(sWorld.GetAvailableDbcLocale(locale)), m_sessionDbLocaleIndex(sObjectMgr.GetIndexForLocale(locale)),
m_latency(0), m_tutorialState(TUTORIALDATA_UNCHANGED)
{
if (sock)
{
m_Address = sock->GetRemoteAddress ();
sock->AddReference ();
}
}
/// WorldSession destructor
WorldSession::~WorldSession()
{
///- unload player if not unloaded
if (_player)
LogoutPlayer (true);
/// - If have unclosed socket, close it
if (m_Socket)
{
m_Socket->CloseSocket ();
m_Socket->RemoveReference ();
m_Socket = NULL;
}
///- empty incoming packet queue
WorldPacket* packet;
while(_recvQueue.next(packet))
delete packet;
}
void WorldSession::SizeError(WorldPacket const& packet, uint32 size) const
{
sLog.outError("Client (account %u) send packet %s (%u) with size " SIZEFMTD " but expected %u (attempt crash server?), skipped",
GetAccountId(),LookupOpcodeName(packet.GetOpcode()),packet.GetOpcode(),packet.size(),size);
}
/// Get the player name
char const* WorldSession::GetPlayerName() const
{
return GetPlayer() ? GetPlayer()->GetName() : "<none>";
}
/// Send a packet to the client
void WorldSession::SendPacket(WorldPacket const* packet)
{
if (!m_Socket)
return;
#ifdef MANGOS_DEBUG
// Code for network use statistic
static uint64 sendPacketCount = 0;
static uint64 sendPacketBytes = 0;
static time_t firstTime = time(NULL);
static time_t lastTime = firstTime; // next 60 secs start time
static uint64 sendLastPacketCount = 0;
static uint64 sendLastPacketBytes = 0;
time_t cur_time = time(NULL);
if((cur_time - lastTime) < 60)
{
sendPacketCount+=1;
sendPacketBytes+=packet->size();
sendLastPacketCount+=1;
sendLastPacketBytes+=packet->size();
}
else
{
uint64 minTime = uint64(cur_time - lastTime);
uint64 fullTime = uint64(lastTime - firstTime);
DETAIL_LOG("Send all time packets count: " UI64FMTD " bytes: " UI64FMTD " avr.count/sec: %f avr.bytes/sec: %f time: %u",sendPacketCount,sendPacketBytes,float(sendPacketCount)/fullTime,float(sendPacketBytes)/fullTime,uint32(fullTime));
DETAIL_LOG("Send last min packets count: " UI64FMTD " bytes: " UI64FMTD " avr.count/sec: %f avr.bytes/sec: %f",sendLastPacketCount,sendLastPacketBytes,float(sendLastPacketCount)/minTime,float(sendLastPacketBytes)/minTime);
lastTime = cur_time;
sendLastPacketCount = 1;
sendLastPacketBytes = packet->wpos(); // wpos is real written size
}
#endif // !MANGOS_DEBUG
if (m_Socket->SendPacket (*packet) == -1)
m_Socket->CloseSocket ();
}
/// Add an incoming packet to the queue
void WorldSession::QueuePacket(WorldPacket* new_packet)
{
_recvQueue.add(new_packet);
}
/// Logging helper for unexpected opcodes
void WorldSession::LogUnexpectedOpcode(WorldPacket* packet, const char *reason)
{
sLog.outError( "SESSION: received unexpected opcode %s (0x%.4X) %s",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode(),
reason);
}
/// Logging helper for unexpected opcodes
void WorldSession::LogUnprocessedTail(WorldPacket *packet)
{
sLog.outError( "SESSION: opcode %s (0x%.4X) have unprocessed tail data (read stop at " SIZEFMTD " from " SIZEFMTD ")",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode(),
packet->rpos(),packet->wpos());
}
/// Update the WorldSession (triggered by World update)
bool WorldSession::Update(uint32 diff, PacketFilter& updater)
{
///- Retrieve packets from the receive queue and call the appropriate handlers
/// not process packets if socket already closed
WorldPacket* packet;
while (m_Socket && !m_Socket->IsClosed() && _recvQueue.next(packet, updater))
{
/*#if 1
sLog.outError( "MOEP: %s (0x%.4X)",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode());
#endif*/
OpcodeHandler const& opHandle = opcodeTable[packet->GetOpcode()];
try
{
switch (opHandle.status)
{
case STATUS_LOGGEDIN:
if(!_player)
{
// skip STATUS_LOGGEDIN opcode unexpected errors if player logout sometime ago - this can be network lag delayed packets
if(!m_playerRecentlyLogout)
LogUnexpectedOpcode(packet, "the player has not logged in yet");
}
else if(_player->IsInWorld())
ExecuteOpcode(opHandle, packet);
// lag can cause STATUS_LOGGEDIN opcodes to arrive after the player started a transfer
break;
case STATUS_LOGGEDIN_OR_RECENTLY_LOGGEDOUT:
if(!_player && !m_playerRecentlyLogout)
{
LogUnexpectedOpcode(packet, "the player has not logged in yet and not recently logout");
}
else
// not expected _player or must checked in packet hanlder
ExecuteOpcode(opHandle, packet);
break;
case STATUS_TRANSFER:
if(!_player)
LogUnexpectedOpcode(packet, "the player has not logged in yet");
else if(_player->IsInWorld())
LogUnexpectedOpcode(packet, "the player is still in world");
else
ExecuteOpcode(opHandle, packet);
break;
case STATUS_AUTHED:
// prevent cheating with skip queue wait
if(m_inQueue)
{
LogUnexpectedOpcode(packet, "the player not pass queue yet");
break;
}
// single from authed time opcodes send in to after logout time
// and before other STATUS_LOGGEDIN_OR_RECENTLY_LOGGOUT opcodes.
if (packet->GetOpcode() != CMSG_SET_ACTIVE_VOICE_CHANNEL)
m_playerRecentlyLogout = false;
ExecuteOpcode(opHandle, packet);
break;
case STATUS_NEVER:
sLog.outError( "SESSION: received not allowed opcode %s (0x%.4X)",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode());
break;
case STATUS_UNHANDLED:
DEBUG_LOG("SESSION: received not handled opcode %s (0x%.4X)",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode());
break;
default:
sLog.outError("SESSION: received wrong-status-req opcode %s (0x%.4X)",
LookupOpcodeName(packet->GetOpcode()),
packet->GetOpcode());
break;
}
}
catch (ByteBufferException &)
{
sLog.outError("WorldSession::Update ByteBufferException occured while parsing a packet (opcode: %u) from client %s, accountid=%i.",
packet->GetOpcode(), GetRemoteAddress().c_str(), GetAccountId());
if (sLog.HasLogLevelOrHigher(LOG_LVL_DEBUG))
{
sLog.outDebug("Dumping error causing packet:");
packet->hexlike();
}
if (sWorld.getConfig(CONFIG_BOOL_KICK_PLAYER_ON_BAD_PACKET))
{
DETAIL_LOG("Disconnecting session [account id %u / address %s] for badly formatted packet.",
GetAccountId(), GetRemoteAddress().c_str());
KickPlayer();
}
}
delete packet;
}
///- Cleanup socket pointer if need
if (m_Socket && m_Socket->IsClosed ())
{
m_Socket->RemoveReference ();
m_Socket = NULL;
}
//check if we are safe to proceed with logout
//logout procedure should happen only in World::UpdateSessions() method!!!
if(updater.ProcessLogout())
{
///- If necessary, log the player out
time_t currTime = time(NULL);
if (!m_Socket || (ShouldLogOut(currTime) && !m_playerLoading))
LogoutPlayer(true);
if (!m_Socket)
return false; //Will remove this session from the world session map
}
return true;
}
/// %Log the player out
void WorldSession::LogoutPlayer(bool Save)
{
// finish pending transfers before starting the logout
while(_player && _player->IsBeingTeleportedFar())
HandleMoveWorldportAckOpcode();
m_playerLogout = true;
m_playerSave = Save;
if (_player)
{
sLog.outChar("Account: %d (IP: %s) Logout Character:[%s] (guid: %u)", GetAccountId(), GetRemoteAddress().c_str(), _player->GetName() ,_player->GetGUIDLow());
if (uint64 lguid = GetPlayer()->GetLootGUID())
DoLootRelease(lguid);
///- If the player just died before logging out, make him appear as a ghost
//FIXME: logout must be delayed in case lost connection with client in time of combat
if (_player->GetDeathTimer())
{
_player->getHostileRefManager().deleteReferences();
_player->BuildPlayerRepop();
_player->RepopAtGraveyard();
}
else if (!_player->getAttackers().empty())
{
_player->CombatStop();
_player->getHostileRefManager().setOnlineOfflineState(false);
_player->RemoveAllAurasOnDeath();
// build set of player who attack _player or who have pet attacking of _player
std::set<Player*> aset;
for(Unit::AttackerSet::const_iterator itr = _player->getAttackers().begin(); itr != _player->getAttackers().end(); ++itr)
{
Unit* owner = (*itr)->GetOwner(); // including player controlled case
if(owner)
{
if(owner->GetTypeId()==TYPEID_PLAYER)
aset.insert((Player*)owner);
}
else
if((*itr)->GetTypeId()==TYPEID_PLAYER)
aset.insert((Player*)(*itr));
}
_player->SetPvPDeath(!aset.empty());
_player->KillPlayer();
_player->BuildPlayerRepop();
_player->RepopAtGraveyard();
// give honor to all attackers from set like group case
for(std::set<Player*>::const_iterator itr = aset.begin(); itr != aset.end(); ++itr)
(*itr)->RewardHonor(_player,aset.size());
// give bg rewards and update counters like kill by first from attackers
// this can't be called for all attackers.
if(!aset.empty())
if(BattleGround *bg = _player->GetBattleGround())
bg->HandleKillPlayer(_player,*aset.begin());
}
else if(_player->HasAuraType(SPELL_AURA_SPIRIT_OF_REDEMPTION))
{
// this will kill character by SPELL_AURA_SPIRIT_OF_REDEMPTION
_player->RemoveSpellsCausingAura(SPELL_AURA_MOD_SHAPESHIFT);
//_player->SetDeathPvP(*); set at SPELL_AURA_SPIRIT_OF_REDEMPTION apply time
_player->KillPlayer();
_player->BuildPlayerRepop();
_player->RepopAtGraveyard();
}
//drop a flag if player is carrying it
if(BattleGround *bg = _player->GetBattleGround())
bg->EventPlayerLoggedOut(_player);
///- Teleport to home if the player is in an invalid instance
if(!_player->m_InstanceValid && !_player->isGameMaster())
{
_player->TeleportToHomebind();
//this is a bad place to call for far teleport because we need player to be in world for successful logout
//maybe we should implement delayed far teleport logout?
}
// FG: finish pending transfers after starting the logout
// this should fix players beeing able to logout and login back with full hp at death position
while(_player->IsBeingTeleportedFar())
HandleMoveWorldportAckOpcode();
for (int i=0; i < PLAYER_MAX_BATTLEGROUND_QUEUES; ++i)
{
if(BattleGroundQueueTypeId bgQueueTypeId = _player->GetBattleGroundQueueTypeId(i))
{
_player->RemoveBattleGroundQueueId(bgQueueTypeId);
sBattleGroundMgr.m_BattleGroundQueues[ bgQueueTypeId ].RemovePlayer(_player->GetObjectGuid(), true);
}
}
///- Reset the online field in the account table
// no point resetting online in character table here as Player::SaveToDB() will set it to 1 since player has not been removed from world at this stage
// No SQL injection as AccountID is uint32
LoginDatabase.PExecute("UPDATE account SET active_realm_id = 0 WHERE id = '%u'", GetAccountId());
///- If the player is in a guild, update the guild roster and broadcast a logout message to other guild members
if (Guild *guild = sObjectMgr.GetGuildById(_player->GetGuildId()))
{
if (MemberSlot* slot = guild->GetMemberSlot(_player->GetObjectGuid()))
{
slot->SetMemberStats(_player);
slot->UpdateLogoutTime();
}
guild->BroadcastEvent(GE_SIGNED_OFF, _player->GetGUID(), _player->GetName());
}
///- Remove pet
_player->RemovePet(PET_SAVE_AS_CURRENT);
///- empty buyback items and save the player in the database
// some save parts only correctly work in case player present in map/player_lists (pets, etc)
if(Save)
{
uint32 eslot;
for(int j = BUYBACK_SLOT_START; j < BUYBACK_SLOT_END; ++j)
{
eslot = j - BUYBACK_SLOT_START;
_player->SetUInt64Value(PLAYER_FIELD_VENDORBUYBACK_SLOT_1 + (eslot * 2), 0);
_player->SetUInt32Value(PLAYER_FIELD_BUYBACK_PRICE_1 + eslot, 0);
_player->SetUInt32Value(PLAYER_FIELD_BUYBACK_TIMESTAMP_1 + eslot, 0);
}
_player->SaveToDB();
}
///- Leave all channels before player delete...
_player->CleanupChannels();
///- If the player is in a group (or invited), remove him. If the group if then only 1 person, disband the group.
_player->UninviteFromGroup();
// remove player from the group if he is:
// a) in group; b) not in raid group; c) logging out normally (not being kicked or disconnected)
if(_player->GetGroup() && !_player->GetGroup()->isRaidGroup() && m_Socket)
_player->RemoveFromGroup();
///- Send update to group
if(_player->GetGroup())
_player->GetGroup()->SendUpdate();
///- Broadcast a logout message to the player's friends
sSocialMgr.SendFriendStatus(_player, FRIEND_OFFLINE, _player->GetObjectGuid(), true);
sSocialMgr.RemovePlayerSocial (_player->GetGUIDLow ());
///- Remove the player from the world
// the player may not be in the world when logging out
// e.g if he got disconnected during a transfer to another map
// calls to GetMap in this case may cause crashes
Map* _map = _player->GetMap();
_map->Remove(_player, true);
SetPlayer(NULL); // deleted in Remove call
///- Send the 'logout complete' packet to the client
WorldPacket data( SMSG_LOGOUT_COMPLETE, 0 );
SendPacket( &data );
///- Since each account can only have one online character at any given time, ensure all characters for active account are marked as offline
//No SQL injection as AccountId is uint32
CharacterDatabase.PExecute("UPDATE characters SET online = 0 WHERE account = '%u'",
GetAccountId());
DEBUG_LOG( "SESSION: Sent SMSG_LOGOUT_COMPLETE Message" );
}
m_playerLogout = false;
m_playerSave = false;
m_playerRecentlyLogout = true;
LogoutRequest(0);
}
/// Kick a player out of the World
void WorldSession::KickPlayer()
{
if (m_Socket)
m_Socket->CloseSocket ();
}
/// Cancel channeling handler
void WorldSession::SendAreaTriggerMessage(const char* Text, ...)
{
va_list ap;
char szStr [1024];
szStr[0] = '\0';
va_start(ap, Text);
vsnprintf( szStr, 1024, Text, ap );
va_end(ap);
uint32 length = strlen(szStr)+1;
WorldPacket data(SMSG_AREA_TRIGGER_MESSAGE, 4+length);
data << length;
data << szStr;
SendPacket(&data);
}
void WorldSession::SendNotification(const char *format,...)
{
if(format)
{
va_list ap;
char szStr [1024];
szStr[0] = '\0';
va_start(ap, format);
vsnprintf( szStr, 1024, format, ap );
va_end(ap);
WorldPacket data(SMSG_NOTIFICATION, (strlen(szStr)+1));
data << szStr;
SendPacket(&data);
}
}
void WorldSession::SendNotification(int32 string_id,...)
{
char const* format = GetMangosString(string_id);
if(format)
{
va_list ap;
char szStr [1024];
szStr[0] = '\0';
va_start(ap, string_id);
vsnprintf( szStr, 1024, format, ap );
va_end(ap);
WorldPacket data(SMSG_NOTIFICATION, (strlen(szStr)+1));
data << szStr;
SendPacket(&data);
}
}
void WorldSession::SendSetPhaseShift(uint32 PhaseShift)
{
WorldPacket data(SMSG_SET_PHASE_SHIFT, 4);
data << uint32(PhaseShift);
SendPacket(&data);
}
const char * WorldSession::GetMangosString( int32 entry ) const
{
return sObjectMgr.GetMangosString(entry,GetSessionDbLocaleIndex());
}
void WorldSession::Handle_NULL( WorldPacket& recvPacket )
{
DEBUG_LOG("SESSION: received unimplemented opcode %s (0x%.4X)",
LookupOpcodeName(recvPacket.GetOpcode()),
recvPacket.GetOpcode());
}
void WorldSession::Handle_EarlyProccess( WorldPacket& recvPacket )
{
sLog.outError( "SESSION: received opcode %s (0x%.4X) that must be processed in WorldSocket::OnRead",
LookupOpcodeName(recvPacket.GetOpcode()),
recvPacket.GetOpcode());
}
void WorldSession::Handle_ServerSide( WorldPacket& recvPacket )
{
sLog.outError("SESSION: received server-side opcode %s (0x%.4X)",
LookupOpcodeName(recvPacket.GetOpcode()),
recvPacket.GetOpcode());
}
void WorldSession::Handle_Deprecated( WorldPacket& recvPacket )
{
sLog.outError( "SESSION: received deprecated opcode %s (0x%.4X)",
LookupOpcodeName(recvPacket.GetOpcode()),
recvPacket.GetOpcode());
}
void WorldSession::SendAuthWaitQue(uint32 position)
{
if(position == 0)
{
WorldPacket packet( SMSG_AUTH_RESPONSE, 1 );
packet << uint8( AUTH_OK );
SendPacket(&packet);
}
else
{
WorldPacket packet( SMSG_AUTH_RESPONSE, 1+4+1 );
packet << uint8(AUTH_WAIT_QUEUE);
packet << uint32(position);
packet << uint8(0); // unk 3.3.0
SendPacket(&packet);
}
}
void WorldSession::LoadGlobalAccountData()
{
LoadAccountData(
CharacterDatabase.PQuery("SELECT type, time, data FROM account_data WHERE account='%u'", GetAccountId()),
GLOBAL_CACHE_MASK
);
}
void WorldSession::LoadAccountData(QueryResult* result, uint32 mask)
{
for (uint32 i = 0; i < NUM_ACCOUNT_DATA_TYPES; ++i)
if (mask & (1 << i))
m_accountData[i] = AccountData();
if(!result)
return;
do
{
Field *fields = result->Fetch();
uint32 type = fields[0].GetUInt32();
if (type >= NUM_ACCOUNT_DATA_TYPES)
{
sLog.outError("Table `%s` have invalid account data type (%u), ignore.",
mask == GLOBAL_CACHE_MASK ? "account_data" : "character_account_data", type);
continue;
}
if ((mask & (1 << type))==0)
{
sLog.outError("Table `%s` have non appropriate for table account data type (%u), ignore.",
mask == GLOBAL_CACHE_MASK ? "account_data" : "character_account_data", type);
continue;
}
m_accountData[type].Time = time_t(fields[1].GetUInt64());
m_accountData[type].Data = fields[2].GetCppString();
} while (result->NextRow());
delete result;
}
void WorldSession::SetAccountData(AccountDataType type, time_t time_, std::string data)
{
if ((1 << type) & GLOBAL_CACHE_MASK)
{
uint32 acc = GetAccountId();
CharacterDatabase.BeginTransaction ();
CharacterDatabase.PExecute("DELETE FROM account_data WHERE account='%u' AND type='%u'", acc, type);
std::string safe_data = data;
CharacterDatabase.escape_string(safe_data);
CharacterDatabase.PExecute("INSERT INTO account_data VALUES ('%u','%u','" UI64FMTD "','%s')", acc, type, uint64(time_), safe_data.c_str());
CharacterDatabase.CommitTransaction ();
}
else
{
// _player can be NULL and packet received after logout but m_GUID still store correct guid
if(!m_GUIDLow)
return;
CharacterDatabase.BeginTransaction ();
CharacterDatabase.PExecute("DELETE FROM character_account_data WHERE guid='%u' AND type='%u'", m_GUIDLow, type);
std::string safe_data = data;
CharacterDatabase.escape_string(safe_data);
CharacterDatabase.PExecute("INSERT INTO character_account_data VALUES ('%u','%u','" UI64FMTD "','%s')", m_GUIDLow, type, uint64(time_), safe_data.c_str());
CharacterDatabase.CommitTransaction ();
}
m_accountData[type].Time = time_;
m_accountData[type].Data = data;
}
void WorldSession::SendAccountDataTimes(uint32 mask)
{
WorldPacket data( SMSG_ACCOUNT_DATA_TIMES, 4+1+4+8*4 ); // changed in WotLK
data << uint32(time(NULL)); // unix time of something
data << uint8(1);
data << uint32(mask); // type mask
for(uint32 i = 0; i < NUM_ACCOUNT_DATA_TYPES; ++i)
if(mask & (1 << i))
data << uint32(GetAccountData(AccountDataType(i))->Time);// also unix time
SendPacket(&data);
}
void WorldSession::LoadTutorialsData()
{
for ( int aX = 0 ; aX < 8 ; ++aX )
m_Tutorials[ aX ] = 0;
QueryResult *result = CharacterDatabase.PQuery("SELECT tut0,tut1,tut2,tut3,tut4,tut5,tut6,tut7 FROM character_tutorial WHERE account = '%u'", GetAccountId());
if(!result)
{
m_tutorialState = TUTORIALDATA_NEW;
return;
}
do
{
Field *fields = result->Fetch();
for (int iI = 0; iI < 8; ++iI)
m_Tutorials[iI] = fields[iI].GetUInt32();
}
while( result->NextRow() );
delete result;
m_tutorialState = TUTORIALDATA_UNCHANGED;
}
void WorldSession::SendTutorialsData()
{
WorldPacket data(SMSG_TUTORIAL_FLAGS, 4*8);
for(uint32 i = 0; i < 8; ++i)
data << m_Tutorials[i];
SendPacket(&data);
}
void WorldSession::SaveTutorialsData()
{
switch(m_tutorialState)
{
case TUTORIALDATA_CHANGED:
CharacterDatabase.PExecute("UPDATE character_tutorial SET tut0='%u', tut1='%u', tut2='%u', tut3='%u', tut4='%u', tut5='%u', tut6='%u', tut7='%u' WHERE account = '%u'",
m_Tutorials[0], m_Tutorials[1], m_Tutorials[2], m_Tutorials[3], m_Tutorials[4], m_Tutorials[5], m_Tutorials[6], m_Tutorials[7], GetAccountId());
break;
case TUTORIALDATA_NEW:
CharacterDatabase.PExecute("INSERT INTO character_tutorial (account,tut0,tut1,tut2,tut3,tut4,tut5,tut6,tut7) VALUES ('%u', '%u', '%u', '%u', '%u', '%u', '%u', '%u', '%u')",
GetAccountId(), m_Tutorials[0], m_Tutorials[1], m_Tutorials[2], m_Tutorials[3], m_Tutorials[4], m_Tutorials[5], m_Tutorials[6], m_Tutorials[7]);
break;
case TUTORIALDATA_UNCHANGED:
break;
}
m_tutorialState = TUTORIALDATA_UNCHANGED;
}
void WorldSession::ReadAddonsInfo(WorldPacket &data)
{
if (data.rpos() + 4 > data.size())
return;
uint32 size;
data >> size;
if(!size)
return;
if(size > 0xFFFFF)
{
sLog.outError("WorldSession::ReadAddonsInfo addon info too big, size %u", size);
return;
}
uLongf uSize = size;
uint32 pos = data.rpos();
ByteBuffer addonInfo;
addonInfo.resize(size);
if (uncompress(const_cast<uint8*>(addonInfo.contents()), &uSize, const_cast<uint8*>(data.contents() + pos), data.size() - pos) == Z_OK)
{
uint32 addonsCount;
addonInfo >> addonsCount; // addons count
for(uint32 i = 0; i < addonsCount; ++i)
{
std::string addonName;
uint8 enabled;
uint32 crc, unk1;
// check next addon data format correctness
if(addonInfo.rpos()+1 > addonInfo.size())
return;
addonInfo >> addonName;
addonInfo >> enabled >> crc >> unk1;
DEBUG_LOG("ADDON: Name: %s, Enabled: 0x%x, CRC: 0x%x, Unknown2: 0x%x", addonName.c_str(), enabled, crc, unk1);
m_addonsList.push_back(AddonInfo(addonName, enabled, crc));
}
uint32 unk2;
addonInfo >> unk2;
if(addonInfo.rpos() != addonInfo.size())
DEBUG_LOG("packet under read!");
}
else
sLog.outError("Addon packet uncompress error!");
}
void WorldSession::SendAddonsInfo()
{
unsigned char tdata[256] =
{
0xC3, 0x5B, 0x50, 0x84, 0xB9, 0x3E, 0x32, 0x42, 0x8C, 0xD0, 0xC7, 0x48, 0xFA, 0x0E, 0x5D, 0x54,
0x5A, 0xA3, 0x0E, 0x14, 0xBA, 0x9E, 0x0D, 0xB9, 0x5D, 0x8B, 0xEE, 0xB6, 0x84, 0x93, 0x45, 0x75,
0xFF, 0x31, 0xFE, 0x2F, 0x64, 0x3F, 0x3D, 0x6D, 0x07, 0xD9, 0x44, 0x9B, 0x40, 0x85, 0x59, 0x34,
0x4E, 0x10, 0xE1, 0xE7, 0x43, 0x69, 0xEF, 0x7C, 0x16, 0xFC, 0xB4, 0xED, 0x1B, 0x95, 0x28, 0xA8,
0x23, 0x76, 0x51, 0x31, 0x57, 0x30, 0x2B, 0x79, 0x08, 0x50, 0x10, 0x1C, 0x4A, 0x1A, 0x2C, 0xC8,
0x8B, 0x8F, 0x05, 0x2D, 0x22, 0x3D, 0xDB, 0x5A, 0x24, 0x7A, 0x0F, 0x13, 0x50, 0x37, 0x8F, 0x5A,
0xCC, 0x9E, 0x04, 0x44, 0x0E, 0x87, 0x01, 0xD4, 0xA3, 0x15, 0x94, 0x16, 0x34, 0xC6, 0xC2, 0xC3,
0xFB, 0x49, 0xFE, 0xE1, 0xF9, 0xDA, 0x8C, 0x50, 0x3C, 0xBE, 0x2C, 0xBB, 0x57, 0xED, 0x46, 0xB9,
0xAD, 0x8B, 0xC6, 0xDF, 0x0E, 0xD6, 0x0F, 0xBE, 0x80, 0xB3, 0x8B, 0x1E, 0x77, 0xCF, 0xAD, 0x22,
0xCF, 0xB7, 0x4B, 0xCF, 0xFB, 0xF0, 0x6B, 0x11, 0x45, 0x2D, 0x7A, 0x81, 0x18, 0xF2, 0x92, 0x7E,
0x98, 0x56, 0x5D, 0x5E, 0x69, 0x72, 0x0A, 0x0D, 0x03, 0x0A, 0x85, 0xA2, 0x85, 0x9C, 0xCB, 0xFB,<|fim▁hole|> 0x0D, 0x36, 0xEA, 0x01, 0xE0, 0xAA, 0x91, 0x20, 0x54, 0xF0, 0x72, 0xD8, 0x1E, 0xC7, 0x89, 0xD2
};
WorldPacket data(SMSG_ADDON_INFO, 4);
for(AddonsList::iterator itr = m_addonsList.begin(); itr != m_addonsList.end(); ++itr)
{
uint8 state = 2; // 2 is sent here
data << uint8(state);
uint8 unk1 = 1; // 1 is sent here
data << uint8(unk1);
if (unk1)
{
uint8 unk2 = (itr->CRC != 0x4c1c776d); // If addon is Standard addon CRC
data << uint8(unk2); // if 1, than add addon public signature
if (unk2) // if CRC is wrong, add public key (client need it)
data.append(tdata, sizeof(tdata));
data << uint32(0);
}
uint8 unk3 = 0; // 0 is sent here
data << uint8(unk3); // use <Addon>\<Addon>.url file or not
if (unk3)
{
// String, 256 (null terminated?)
data << uint8(0);
}
}
m_addonsList.clear();
uint32 count = 0;
data << uint32(count); // BannedAddons count
/*for(uint32 i = 0; i < count; ++i)
{
uint32
string (16 bytes)
string (16 bytes)
uint32
uint32
uint32
}*/
SendPacket(&data);
}
void WorldSession::SetPlayer( Player *plr )
{
_player = plr;
// set m_GUID that can be used while player loggined and later until m_playerRecentlyLogout not reset
if(_player)
m_GUIDLow = _player->GetGUIDLow();
}
void WorldSession::SendRedirectClient(std::string& ip, uint16 port)
{
uint32 ip2 = ACE_OS::inet_addr(ip.c_str());
WorldPacket pkt(SMSG_REDIRECT_CLIENT, 4 + 2 + 4 + 20);
pkt << uint32(ip2); // inet_addr(ipstr)
pkt << uint16(port); // port
pkt << uint32(GetLatency()); // latency-related?
HMACSHA1 sha1(20, m_Socket->GetSessionKey().AsByteArray());
sha1.UpdateData((uint8*)&ip2, 4);
sha1.UpdateData((uint8*)&port, 2);
sha1.Finalize();
pkt.append(sha1.GetDigest(), 20); // hmacsha1(ip+port) w/ sessionkey as seed
SendPacket(&pkt);
}
void WorldSession::ExecuteOpcode( OpcodeHandler const& opHandle, WorldPacket* packet )
{
// need prevent do internal far teleports in handlers because some handlers do lot steps
// or call code that can do far teleports in some conditions unexpectedly for generic way work code
if (_player)
_player->SetCanDelayTeleport(true);
(this->*opHandle.handler)(*packet);
if (_player)
{
// can be not set in fact for login opcode, but this not create porblems.
_player->SetCanDelayTeleport(false);
//we should execute delayed teleports only for alive(!) players
//because we don't want player's ghost teleported from graveyard
if (_player->IsHasDelayedTeleport())
_player->TeleportTo(_player->m_teleport_dest, _player->m_teleport_options);
}
if (packet->rpos() < packet->wpos() && sLog.HasLogLevelOrHigher(LOG_LVL_DEBUG))
LogUnprocessedTail(packet);
}<|fim▁end|> | 0x56, 0x6E, 0x8F, 0x44, 0xBB, 0x8F, 0x02, 0x22, 0x68, 0x63, 0x97, 0xBC, 0x85, 0xBA, 0xA8, 0xF7,
0xB5, 0x40, 0x68, 0x3C, 0x77, 0x86, 0x6F, 0x4B, 0xD7, 0x88, 0xCA, 0x8A, 0xD7, 0xCE, 0x36, 0xF0,
0x45, 0x6E, 0xD5, 0x64, 0x79, 0x0F, 0x17, 0xFC, 0x64, 0xDD, 0x10, 0x6F, 0xF3, 0xF5, 0xE0, 0xA6,
0xC3, 0xFB, 0x1B, 0x8C, 0x29, 0xEF, 0x8E, 0xE5, 0x34, 0xCB, 0xD1, 0x2A, 0xCE, 0x79, 0xC3, 0x9A, |
<|file_name|>0005_auto_20160816_2140.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-17 00:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('turnos', '0004_auto_20160519_0134'),<|fim▁hole|>
operations = [
migrations.RenameField('Turno', 'asistio', 'no_asistio'),
migrations.RenameField('Turno', 'aviso', 'no_aviso')
]<|fim▁end|> | ] |
<|file_name|>Triple.java<|end_file_name|><|fim▁begin|><|fim▁hole|> * @author Michalis Lazaridis <[email protected]>
*/
public class Triple<L,M,R> {
private final L left;
private final M middle;
private final R right;
public Triple(L left, M middle, R right) {
this.left = left;
this.middle = middle;
this.right = right;
}
public L getLeft() { return left; }
public M getMiddle() { return middle; }
public R getRight() { return right; }
@Override
public int hashCode() { return (left.hashCode() ^ middle.hashCode() ^ right.hashCode()) % Integer.MAX_VALUE; }
@Override
public boolean equals(Object o) {
if (!(o instanceof Triple)) return false;
Triple triplo = (Triple) o;
return this.left.equals(triplo.getLeft()) &&
this.middle.equals(triplo.getMiddle()) &&
this.right.equals(triplo.getRight());
}
@Override
public String toString()
{
return "Triple ( " + getLeft().toString() + ", " + getMiddle().toString() + ", " + getRight().toString() + " )";
}
}<|fim▁end|> | package gr.iti.openzoo.pojos;
/**
* |
<|file_name|>Search.js<|end_file_name|><|fim▁begin|>import React from 'react'
import {Observable} from 'rx'
import {TextField} from 'material-ui'
import ThemeManager from 'material-ui/lib/styles/theme-manager';
import MyRawTheme from '../components/Theme.js';
import ThemeDecorator from 'material-ui/lib/styles/theme-decorator';
import {compose} from 'recompose'
import {observeProps, createEventHandler} from 'rx-recompose'
import {clickable} from '../style.css'<|fim▁hole|>import {View} from '../components'
let Search = compose(
// ASDFGHJKL:
ThemeDecorator(ThemeManager.getMuiTheme(MyRawTheme))
,
observeProps(props$ => {
// Create search query observable
let setQuery = createEventHandler()
let query$ = setQuery.share()
query$
// Only search for songs that are not only spaces 😂
.filter(x => x.trim() !== '')
// Only every 300 ms
.debounce(300)
// Get the `doSearch` method from props
.withLatestFrom(props$.pluck('doSearch'), (query, doSearch) => doSearch(query))
// Search for the query
.subscribe(func => {
func()
})
return {
// Pass down function to set the query
setQuery: Observable.just(setQuery),
// Pass down the current query value
query: query$.startWith(''),
// Pass down force-search function when pressing enter
doSearch: props$.pluck('doSearch'),
// Function to start playing song when clicked on
playSong: props$.pluck('playSong'),
// Searchresults to display
searchResults:
Observable.merge(
// Results from the search
props$.pluck('results$') // Get results observable
.distinctUntilChanged() // Only when unique
.flatMapLatest(x => x) // Morph into the results$
.startWith([]) // And set off with a empty array
,
query$
// When query is only spaces
.filter(x => x.trim() === '')
// Reset the results to empty array
.map(() => [])
),
}
})
)(({query, setQuery, searchResults, playSong, doSearch}) => (
<View>
<TextField
hintText="Search for a song! :D"
onChange={(e,value) => setQuery(e.target.value)}
value={query}
onEnterKeyDown={doSearch(query)}
fullWidth={true}
underlineStyle={{borderWidth: 2}}
/>
<View>
{ /* List all the results */ }
{ searchResults.map(result =>
<View
key={result.nid}
className={clickable}
// On click, reset the query and play the song!
onClick={() => {
playSong(result)()
setQuery('')
}}
// Same as setting the text, but more compact
children={`${result.title} - ${result.artist}`}
/>
)}
</View>
</View>
))
export default Search<|fim▁end|> | |
<|file_name|>OtherwiseNode.java<|end_file_name|><|fim▁begin|>/* OtherwiseNode.java --
Copyright (C) 2004 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend<|fim▁hole|>obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package gnu.xml.transform;
import javax.xml.namespace.QName;
import javax.xml.transform.TransformerException;
import org.w3c.dom.Node;
/**
* A template node representing an XSL <code>otherwise</code> instruction.
*
* @author <a href='mailto:[email protected]'>Chris Burdess</a>
*/
final class OtherwiseNode
extends TemplateNode
{
OtherwiseNode(TemplateNode children, TemplateNode next)
{
super(children, next);
}
TemplateNode clone(Stylesheet stylesheet)
{
return new OtherwiseNode((children == null) ? null :
children.clone(stylesheet),
(next == null) ? null :
next.clone(stylesheet));
}
void doApply(Stylesheet stylesheet, QName mode,
Node context, int pos, int len,
Node parent, Node nextSibling)
throws TransformerException
{
if (children != null)
{
children.apply(stylesheet, mode,
context, pos, len,
parent, nextSibling);
}
if (next != null)
{
next.apply(stylesheet, mode,
context, pos, len,
parent, nextSibling);
}
}
public String toString()
{
StringBuffer buf = new StringBuffer(getClass().getName());
buf.append('[');
buf.append(']');
return buf.toString();
}
}<|fim▁end|> | this exception to your version of the library, but you are not |
<|file_name|>teststruct.go<|end_file_name|><|fim▁begin|>package main
import (
"fmt"
"github.com/polariseye/polarserver/common"
"github.com/polariseye/polarserver/common/errorCode"
"github.com/polariseye/polarserver/moduleManage"
)
type testStruct struct {
className string
}
var TestBLL *testStruct
func init() {
TestBLL = NewTestStruct()
moduleManage.RegisterModule(func() (moduleManage.IModule, moduleManage.ModuleType) {<|fim▁hole|> })
}
// 类名
func (this *testStruct) Name() string {
return this.className
}
func (this *testStruct) InitModule() []error {
fmt.Println("初始化")
return nil
}
func (this *testStruct) CheckModule() []error {
fmt.Println("check")
return nil
}
func (this *testStruct) ConvertModule() []error {
fmt.Println("数据转换")
return nil
}
// 接口调用
func (this *testStruct) C_Hello(request *common.RequestModel, d int, name string) *common.ResultModel {
result := common.NewResultModel(errorCode.ClientDataError)
result.Value["Hello"] = name + "_" + this.Name()
result.Value["Extra"] = d
result.SetNormalError(errorCode.Success)
return result
}
func NewTestStruct() *testStruct {
return &testStruct{
className: "TestBLL",
}
}<|fim▁end|> | return NewTestStruct(), moduleManage.NormalModule |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#[derive(Debug, PartialEq)]
pub enum NthPrimeError {
WhyAreWeIndexingFromOneInsteadOfZero,
IveMadeATerribleMistake,
}
pub fn nth(n: usize) -> Result<usize, NthPrimeError> {
if n == 0 {
Err(NthPrimeError::WhyAreWeIndexingFromOneInsteadOfZero)
} else {
let mut ps = vec![2];
let mut q = 3;
while ps.len() < n {
if ps.iter().all(|p| q % p != 0) {
ps.push(q);
}
q += 2;<|fim▁hole|> ps.pop().ok_or(NthPrimeError::IveMadeATerribleMistake)
}
}<|fim▁end|> | } |
<|file_name|>SoundCueEditWidget.cpp<|end_file_name|><|fim▁begin|>//
// This file is part of the Marble Virtual Globe.
//
// This program is free software licensed under the GNU LGPL. You can
// find a copy of this license in LICENSE.txt in the top directory of
// the source code.
//
// Copyright 2013 Mihail Ivchenko <[email protected]>
// Copyright 2014 Sanjiban Bairagya <[email protected]>
// Copyright 2014 Illya Kovalevskyy <[email protected]>
//
#include <QToolButton>
#include <QLabel>
#include <QHBoxLayout>
#include <QLineEdit>
#include <QFileDialog>
#include "SoundCueEditWidget.h"
#include "MarbleWidget.h"
#include "geodata/data/GeoDataSoundCue.h"
#include "GeoDataTypes.h"
#include "MarblePlacemarkModel.h"
namespace Marble {
SoundCueEditWidget::SoundCueEditWidget( const QModelIndex &index, QWidget *parent ) :
QWidget( parent ),
m_index( index ),
m_lineEdit( new QLineEdit ),
m_button( new QToolButton ),
m_button2( new QToolButton )
{
QHBoxLayout *layout = new QHBoxLayout;
layout->setSpacing( 5 );
<|fim▁hole|> m_lineEdit->setPlaceholderText( "Audio location" );
m_lineEdit->setText( soundCueElement()->href() );
layout->addWidget( m_lineEdit );
m_button2->setIcon( QIcon( ":/marble/document-open.png" ) );
connect(m_button2, SIGNAL(clicked()), this, SLOT(open()));
layout->addWidget( m_button2 );
m_button->setIcon( QIcon( ":/marble/document-save.png" ) );
connect(m_button, SIGNAL(clicked()), this, SLOT(save()));
layout->addWidget( m_button );
setLayout( layout );
}
bool SoundCueEditWidget::editable() const
{
return m_button->isEnabled();
}
void SoundCueEditWidget::setEditable( bool editable )
{
m_button->setEnabled( editable );
}
void SoundCueEditWidget::save()
{
soundCueElement()->setHref( m_lineEdit->text() );
emit editingDone(m_index);
}
void SoundCueEditWidget::open()
{
QString fileName = QFileDialog::getOpenFileName(this, tr("Select sound files..."), QDir::homePath(), tr("Supported Sound Files (*.mp3 *.ogg *.wav)"));
m_lineEdit->setText(fileName);
soundCueElement()->setHref( m_lineEdit->text() );
}
GeoDataSoundCue* SoundCueEditWidget::soundCueElement()
{
GeoDataObject *object = qvariant_cast<GeoDataObject*>(m_index.data( MarblePlacemarkModel::ObjectPointerRole ) );
Q_ASSERT( object );
Q_ASSERT( object->nodeType() == GeoDataTypes::GeoDataSoundCueType );
return static_cast<GeoDataSoundCue*>( object );
}
} // namespace Marble
#include "moc_SoundCueEditWidget.cpp"<|fim▁end|> | QLabel* iconLabel = new QLabel;
iconLabel->setPixmap( QPixmap( ":/marble/playback-play.png" ) );
layout->addWidget( iconLabel );
|
<|file_name|>xhr.js<|end_file_name|><|fim▁begin|>var xhrGet = function (url, callback) {
var xhr = new XMLHttpRequest();<|fim▁hole|>
xhr.open('GET', url, true);
xhr.onload = callback;
xhr.send();
};<|fim▁end|> | |
<|file_name|>comp-2372.component.spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
<|fim▁hole|> let fixture: ComponentFixture<Comp2372Component>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ Comp2372Component ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(Comp2372Component);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
});<|fim▁end|> | import { Comp2372Component } from './comp-2372.component';
describe('Comp2372Component', () => {
let component: Comp2372Component; |
<|file_name|>deprecation_test.py<|end_file_name|><|fim▁begin|># Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecation tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def test_silence(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn():
pass
_fn()
self.assertEqual(1, mock_warning.call_count)
with deprecation.silence():
_fn()
self.assertEqual(1, mock_warning.call_count)
_fn()
self.assertEqual(2, mock_warning.call_count)
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@test.mock.patch.object(logging, "warning", autospec=True)
def test_no_date(self, mock_warning):
date = None
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed in a future version."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % instructions, _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(
args[0], r"deprecated and will be removed")
self._assert_subset(set(["in a future version", instructions]),
set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
def test_prop_wrong_order(self):
with self.assertRaisesRegexp(
ValueError,
"make sure @property appears before @deprecated in your source code"):
# pylint: disable=unused-variable
class _Object(object):
def __init(self):
pass
@deprecation.deprecated("2016-07-04", "Instructions.")
@property
def _prop(self):
return "prop_wrong_order"
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
"""prop doc.
Returns:
String.
"""
return "prop_with_doc"
# Assert function docs are properly updated.
self.assertEqual(
"prop doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."<|fim▁hole|> "\n%s"
"\n"
"\nReturns:"
"\n String." % (date, instructions), getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_with_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
return "prop_no_doc"
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_no_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
class DeprecatedArgsTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, None, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, "", "deprecated")
with self.assertRaisesRegexp(ValueError, "argument"):
deprecation.deprecated_args(date, instructions)
def test_deprecated_missing_args(self):
date = "2016-07-04"
instructions = "This is how you update..."
def _fn(arg0, arg1, deprecated=None):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
deprecation.deprecated_args(date, instructions, "missing")(_fn)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_varargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, *deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True, False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_kwargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, **deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, a=True, b=False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "d1", "d2")
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, None, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named_with_ok_vals(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, ("d1", None),
("d2", "my_ok_val"))
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, False, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
# Assert calls with the deprecated arguments don't log warnings if
# the value matches the 'ok_val'.
mock_warning.reset_mock()
self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val"))
self.assertEqual(0, mock_warning.call_count)
class DeprecatedArgValuesTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values("", instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values(
"07-04-2016", instructions, deprecated=True)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, None, deprecated=True)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, "", deprecated=True)
with self.assertRaisesRegexp(ValueError, "argument", deprecated=True):
deprecation.deprecated_arg_values(date, instructions)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
class DeprecationArgumentsTest(test.TestCase):
def testDeprecatedArgumentLookup(self):
good_value = 3
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value, "val_old",
None), good_value)
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", None, "val_old",
good_value), good_value)
with self.assertRaisesRegexp(ValueError,
"Cannot specify both 'val_old' and 'val_new'"):
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value,
"val_old", good_value),
good_value)
def testRewriteArgumentDocstring(self):
docs = """Add `a` and `b`
Args:
a: first arg
b: second arg
"""
new_docs = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right")
new_docs_ref = """Add `left` and `right`
Args:
left: first arg
right: second arg
"""
self.assertEqual(new_docs, new_docs_ref)
if __name__ == "__main__":
test.main()<|fim▁end|> | "\nInstructions for updating:" |
<|file_name|>imagemagick_util.py<|end_file_name|><|fim▁begin|>from django.conf import settings
from django.http import Http404
import os
import shutil
import time
import re
import urlparse
import urllib
from subprocess import call
from exceptions import ImageMagickException, ImageMagickConversionError, ImageMagickOSFileError
from cache_util import file_hash
from django.db.models import ImageField
from django.db.models.fields.files import ImageFieldFile
#Settings
ALLOWED_PARAMS = getattr(settings, "ALLOWED_PARAMS", "adaptive-resize resize extent gravity strip thumbnail trim quality crop liquid-rescale scale rotate shave unsharp watermark".split())
ERROR_IMAGE_URL = getattr(settings, 'ERROR_IMAGE_URL', '')<|fim▁hole|>IMAGEMAGICK_CONVERT_PATH = getattr(settings, 'IMAGEMAGICK_CONVERT_PATH', 'convert')
IMAGEUTIL_SHORTCUTS = getattr(settings, "IMAGEUTIL_SHORTCUTS", {})
#If an image takes more than 5secs or 10mb of memory, IM will quit.
IMAGEMAGICK_ALWAYS_PASS = getattr(settings, "IMAGEMAGICK_ALWAYS_PASS", "-limit area 10mb") # -limit time 5")
#no settings
_IMAGEUTIL_CACHE_ROOT = os.path.join(settings.MEDIA_ROOT, IMAGEUTIL_CACHE_PATH)
def convert(original_image_path, arg):
"""
Takes the file name (relative to MEDIA_ROOT), and a specification of the conversion.
Returns a URL to retrieve the converted file.
See http://www.imagemagick.org/script/command-line-options.php for the possible options.
Does the conversion, if it's not cached, and caches it in MEDIA_ROOT/IMAGEUTIL_CACHE.
Pseudocode for filter:
1. generate the result filename.
2. does it exist? Yes = return it. No = create it.
3. do the conversion; save the file as the result filename.
@accepts:
original_image_path - string - filename of the image; if the file specified lives outside MEDIA_ROOT ImageMagickException will be raised
arg - string - list of arguments. all arguments must be included in ALLOWED_PARAMS, otherwise, ImageMagickException will be raised
@returns:
string - image url
"""
try:
# check that all arguments are in ALLOWED_PARAMS
# we are assuming that all of the params that actually determine action start with dash
panic = [a for a in arg.split() if (a.startswith("-") and not a[1:] in ALLOWED_PARAMS)]
if panic:
raise ImageMagickException("One of the arguments is not in a whitelist. List of arguments supplied: %s" % panic)
arg = IMAGEUTIL_SHORTCUTS.get(arg, arg)
if not original_image_path:
raise ImageMagickOSFileError('No file specified')
if isinstance(original_image_path, ImageField):
original_image_path = original_image_path.path
if isinstance(original_image_path, ImageFieldFile):
original_image_path = original_image_path.path
if not (isinstance(original_image_path, str) or isinstance(original_image_path, unicode)):
raise ImageMagickException('Original image path is a %s, but it must be a string or unicode.' % str(type(original_image_path)))
op = os.path.abspath(os.path.join(settings.MEDIA_ROOT, original_image_path))
if not op.startswith(os.path.normpath(settings.MEDIA_ROOT)): # someone's trying to access an image outsite MEDIA_ROOT; good luck with that!
raise ImageMagickException("Image not under media root")
if arg == "":
#no processing to do.
return urllib.quote(urlparse.urljoin(settings.MEDIA_URL,os.path.relpath(op, settings.MEDIA_ROOT)))
#generate the cache filename
try:
#this depends on the file existing, so we needn't check elsewhere
ophash = file_hash(op)
except OSError, exc:
raise ImageMagickOSFileError(*exc.args)
try:
foldername, filename = op.rsplit(os.path.sep, 1)
except ValueError:
foldername, filename = '', op
try:
name, extension = filename.rsplit(".", 1)
except ValueError:
raise ImageMagickException("Filename does not include extension")
arg_hash = hash(arg)
destination_filename = "o%sa%s.%s" % (ophash, arg_hash, extension)
rel_destination_folder = os.path.join(IMAGEUTIL_CACHE_PATH, filename)
abs_destination_folder = os.path.join(_IMAGEUTIL_CACHE_ROOT, filename)
rel_destination_file = os.path.join(rel_destination_folder, destination_filename)
abs_destination_file = os.path.join(abs_destination_folder, destination_filename)
url = urllib.quote(urlparse.urljoin(settings.MEDIA_URL, rel_destination_file))
#is it in the cache? then return it
if os.path.exists(abs_destination_file):
os.utime(abs_destination_file, None) #update the modified timestamp (for cache purposes)
return url
if not os.path.exists(abs_destination_folder):
os.makedirs(abs_destination_folder)
# make sure that all supplied arguments are in the whitelist
arg = re.sub("\s+", " ", IMAGEMAGICK_ALWAYS_PASS + " " + arg).strip() #having empty args in seems to break 'convert'
arglist = [IMAGEMAGICK_CONVERT_PATH, op,] + arg.split(' ') + [abs_destination_file,]
try:
status = call(arglist)
except OSError:
raise OSError, "Check if your IMAGEMAGICK_CONVERT_PATH is correct. It is currently set to %s" % IMAGEMAGICK_CONVERT_PATH
if status == 0:
return url
else:
cmd = ' '.join(arglist)
raise ImageMagickException, "Error converting %s: ImageMagick returned status %s (command was '%s')" % (op, status, cmd)
except ImageMagickException, e:
# something went wrong. return a filler image or nothing.
# TODO - log, or process the error somehow.
if settings.DEBUG:
raise e
else:
return urllib.quote(ERROR_IMAGE_URL)
def tidy_cache(age=60*60*24*7): #1 week
"""
Walks settings.IMAGE_CACHE_ROOT, deleting all files with a last modified date of more than `age` seconds ago.
"""
cutoff = time.time()-age #num secs since epoch
for path, folders, files in os.walk(_IMAGEUTIL_CACHE_ROOT):
for f in files:
fullpath = os.path.join(path,f)
if os.path.getmtime(fullpath) < cutoff:
os.remove(fullpath)<|fim▁end|> | IMAGEUTIL_CACHE_PATH = getattr(settings, "IMAGEUTIL_CACHE_PATH", "imageutil_cache/") |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#!coding=utf-8
from .record import Record,RecordSet,QueryValue<|fim▁hole|><|fim▁end|> | from .heysqlware import * |
<|file_name|>test_url.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
"""
test_url.py
websocket - WebSocket client library for Python
Copyright 2021 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import unittest
sys.path[0:0] = [""]
from websocket._url import get_proxy_info, parse_url, _is_address_in_network, _is_no_proxy_host
class UrlTest(unittest.TestCase):
def test_address_in_network(self):
self.assertTrue(_is_address_in_network('127.0.0.1', '127.0.0.0/8'))
self.assertTrue(_is_address_in_network('127.1.0.1', '127.0.0.0/8'))
self.assertFalse(_is_address_in_network('127.1.0.1', '127.0.0.0/24'))
def testParseUrl(self):
p = parse_url("ws://www.example.com/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/r/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("wss://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://www.example.com:8080/r?key=value")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r?key=value")
self.assertEqual(p[3], True)
self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
p = parse_url("ws://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("wss://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 443)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
class IsNoProxyHostTest(unittest.TestCase):
def setUp(self):
self.no_proxy = os.environ.get("no_proxy", None)
if "no_proxy" in os.environ:
del os.environ["no_proxy"]
def tearDown(self):
if self.no_proxy:
os.environ["no_proxy"] = self.no_proxy
elif "no_proxy" in os.environ:
del os.environ["no_proxy"]
def testMatchAll(self):
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['*']))
self.assertTrue(_is_no_proxy_host("192.168.0.1", ['*']))
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['other.websocket.org', '*']))
os.environ['no_proxy'] = '*'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
self.assertTrue(_is_no_proxy_host("192.168.0.1", None))
os.environ['no_proxy'] = 'other.websocket.org, *'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
def testIpAddress(self):
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.1']))
self.assertFalse(_is_no_proxy_host("127.0.0.2", ['127.0.0.1']))
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['other.websocket.org', '127.0.0.1']))
self.assertFalse(_is_no_proxy_host("127.0.0.2", ['other.websocket.org', '127.0.0.1']))
os.environ['no_proxy'] = '127.0.0.1'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
os.environ['no_proxy'] = 'other.websocket.org, 127.0.0.1'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
def testIpAddressInRange(self):
self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.0/8']))
self.assertTrue(_is_no_proxy_host("127.0.0.2", ['127.0.0.0/8']))
self.assertFalse(_is_no_proxy_host("127.1.0.1", ['127.0.0.0/24']))
os.environ['no_proxy'] = '127.0.0.0/8'
self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
self.assertTrue(_is_no_proxy_host("127.0.0.2", None))
os.environ['no_proxy'] = '127.0.0.0/24'
self.assertFalse(_is_no_proxy_host("127.1.0.1", None))
def testHostnameMatch(self):
self.assertTrue(_is_no_proxy_host("my.websocket.org", ['my.websocket.org']))
self.assertTrue(_is_no_proxy_host("my.websocket.org", ['other.websocket.org', 'my.websocket.org']))
self.assertFalse(_is_no_proxy_host("my.websocket.org", ['other.websocket.org']))
os.environ['no_proxy'] = 'my.websocket.org'
self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
self.assertFalse(_is_no_proxy_host("other.websocket.org", None))
os.environ['no_proxy'] = 'other.websocket.org, my.websocket.org'
self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
def testHostnameMatchDomain(self):
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['.websocket.org']))
self.assertTrue(_is_no_proxy_host("my.other.websocket.org", ['.websocket.org']))
self.assertTrue(_is_no_proxy_host("any.websocket.org", ['my.websocket.org', '.websocket.org']))
self.assertFalse(_is_no_proxy_host("any.websocket.com", ['.websocket.org']))
os.environ['no_proxy'] = '.websocket.org'
self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
self.assertTrue(_is_no_proxy_host("my.other.websocket.org", None))
self.assertFalse(_is_no_proxy_host("any.websocket.com", None))
os.environ['no_proxy'] = 'my.websocket.org, .websocket.org'<|fim▁hole|>
class ProxyInfoTest(unittest.TestCase):
def setUp(self):
self.http_proxy = os.environ.get("http_proxy", None)
self.https_proxy = os.environ.get("https_proxy", None)
self.no_proxy = os.environ.get("no_proxy", None)
if "http_proxy" in os.environ:
del os.environ["http_proxy"]
if "https_proxy" in os.environ:
del os.environ["https_proxy"]
if "no_proxy" in os.environ:
del os.environ["no_proxy"]
def tearDown(self):
if self.http_proxy:
os.environ["http_proxy"] = self.http_proxy
elif "http_proxy" in os.environ:
del os.environ["http_proxy"]
if self.https_proxy:
os.environ["https_proxy"] = self.https_proxy
elif "https_proxy" in os.environ:
del os.environ["https_proxy"]
if self.no_proxy:
os.environ["no_proxy"] = self.no_proxy
elif "no_proxy" in os.environ:
del os.environ["no_proxy"]
def testProxyFromArgs(self):
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128),
("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128),
("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(
get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(
get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
no_proxy=["example.com"], proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
no_proxy=["echo.websocket.org"], proxy_auth=("a", "b")),
(None, 0, None))
def testProxyFromEnv(self):
os.environ["http_proxy"] = "http://localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, None))
os.environ["http_proxy"] = "http://a:b@localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://john%40example.com:P%40SSWORD@localhost:3128/"
os.environ["https_proxy"] = "http://john%40example.com:P%40SSWORD@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("[email protected]", "P@SSWORD")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
os.environ["no_proxy"] = "example1.com,example2.com"
self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, .websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "127.0.0.0/8, 192.168.0.0/16"
self.assertEqual(get_proxy_info("127.0.0.1", False), (None, 0, None))
self.assertEqual(get_proxy_info("192.168.1.1", False), (None, 0, None))
if __name__ == "__main__":
unittest.main()<|fim▁end|> | self.assertTrue(_is_no_proxy_host("any.websocket.org", None)) |
<|file_name|>ProcessQueue.java<|end_file_name|><|fim▁begin|>/**
* Copyright (C) 2010-2013 Alibaba Group Holding Limited
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.rocketmq.client.impl.consumer;
import com.alibaba.rocketmq.client.log.ClientLogger;
import com.alibaba.rocketmq.common.message.MessageConst;
import com.alibaba.rocketmq.common.message.MessageExt;
import com.alibaba.rocketmq.common.protocol.body.ProcessQueueInfo;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
<|fim▁hole|>import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Queue consumption snapshot
*
* @author shijia.wxr<[email protected]>
* @since 2013-7-24
*/
public class ProcessQueue {
public final static long RebalanceLockMaxLiveTime = Long.parseLong(System.getProperty(
"rocketmq.client.rebalance.lockMaxLiveTime", "30000"));
public final static long RebalanceLockInterval = Long.parseLong(System.getProperty(
"rocketmq.client.rebalance.lockInterval", "20000"));
private final static long PullMaxIdleTime = Long.parseLong(System.getProperty(
"rocketmq.client.pull.pullMaxIdleTime", "120000"));
private final Logger log = ClientLogger.getLog();
private final ReadWriteLock lockTreeMap = new ReentrantReadWriteLock();
private final TreeMap<Long, MessageExt> msgTreeMap = new TreeMap<Long, MessageExt>();
private final AtomicLong msgCount = new AtomicLong();
private final Lock lockConsume = new ReentrantLock();
private final TreeMap<Long, MessageExt> msgTreeMapTemp = new TreeMap<Long, MessageExt>();
private final AtomicLong tryUnlockTimes = new AtomicLong(0);
private volatile long queueOffsetMax = 0L;
private volatile boolean dropped = false;
private volatile long lastPullTimestamp = System.currentTimeMillis();
private volatile long lastConsumeTimestamp = System.currentTimeMillis();
private volatile boolean locked = false;
private volatile long lastLockTimestamp = System.currentTimeMillis();
private volatile boolean consuming = false;
private volatile long msgAccCnt = 0;
public boolean isLockExpired() {
boolean result = (System.currentTimeMillis() - this.lastLockTimestamp) > RebalanceLockMaxLiveTime;
return result;
}
public boolean isPullExpired() {
boolean result = (System.currentTimeMillis() - this.lastPullTimestamp) > PullMaxIdleTime;
return result;
}
public boolean putMessage(final List<MessageExt> msgs) {
boolean dispatchToConsume = false;
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
int validMsgCnt = 0;
for (MessageExt msg : msgs) {
MessageExt old = msgTreeMap.put(msg.getQueueOffset(), msg);
if (null == old) {
validMsgCnt++;
this.queueOffsetMax = msg.getQueueOffset();
}
}
msgCount.addAndGet(validMsgCnt);
if (!msgTreeMap.isEmpty() && !this.consuming) {
dispatchToConsume = true;
this.consuming = true;
}
if (!msgs.isEmpty()) {
MessageExt messageExt = msgs.get(msgs.size() - 1);
String property = messageExt.getProperty(MessageConst.PROPERTY_MAX_OFFSET);
if (property != null) {
long accTotal = Long.parseLong(property) - messageExt.getQueueOffset();
if (accTotal > 0) {
this.msgAccCnt = accTotal;
}
}
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("putMessage exception", e);
}
return dispatchToConsume;
}
public long getMaxSpan() {
try {
this.lockTreeMap.readLock().lockInterruptibly();
try {
if (!this.msgTreeMap.isEmpty()) {
return this.msgTreeMap.lastKey() - this.msgTreeMap.firstKey();
}
} finally {
this.lockTreeMap.readLock().unlock();
}
} catch (InterruptedException e) {
log.error("getMaxSpan exception", e);
}
return 0;
}
public long removeMessage(final List<MessageExt> msgs) {
long result = -1;
final long now = System.currentTimeMillis();
try {
this.lockTreeMap.writeLock().lockInterruptibly();
this.lastConsumeTimestamp = now;
try {
if (!msgTreeMap.isEmpty()) {
result = this.queueOffsetMax + 1;
int removedCnt = 0;
for (MessageExt msg : msgs) {
MessageExt prev = msgTreeMap.remove(msg.getQueueOffset());
if (prev != null) {
removedCnt--;
}
}
msgCount.addAndGet(removedCnt);
if (!msgTreeMap.isEmpty()) {
result = msgTreeMap.firstKey();
}
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (Throwable t) {
log.error("removeMessage exception", t);
}
return result;
}
public TreeMap<Long, MessageExt> getMsgTreeMap() {
return msgTreeMap;
}
public AtomicLong getMsgCount() {
return msgCount;
}
public boolean isDropped() {
return dropped;
}
public void setDropped(boolean dropped) {
this.dropped = dropped;
}
public boolean isLocked() {
return locked;
}
public void setLocked(boolean locked) {
this.locked = locked;
}
public void rollback() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
this.msgTreeMap.putAll(this.msgTreeMapTemp);
this.msgTreeMapTemp.clear();
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("rollback exception", e);
}
}
public long commit() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
Long offset = this.msgTreeMapTemp.lastKey();
msgCount.addAndGet(this.msgTreeMapTemp.size() * (-1));
this.msgTreeMapTemp.clear();
if (offset != null) {
return offset + 1;
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("commit exception", e);
}
return -1;
}
public void makeMessageToCosumeAgain(List<MessageExt> msgs) {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
for (MessageExt msg : msgs) {
this.msgTreeMapTemp.remove(msg.getQueueOffset());
this.msgTreeMap.put(msg.getQueueOffset(), msg);
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("makeMessageToCosumeAgain exception", e);
}
}
public List<MessageExt> takeMessags(final int batchSize) {
List<MessageExt> result = new ArrayList<MessageExt>(batchSize);
final long now = System.currentTimeMillis();
try {
this.lockTreeMap.writeLock().lockInterruptibly();
this.lastConsumeTimestamp = now;
try {
if (!this.msgTreeMap.isEmpty()) {
for (int i = 0; i < batchSize; i++) {
Map.Entry<Long, MessageExt> entry = this.msgTreeMap.pollFirstEntry();
if (entry != null) {
result.add(entry.getValue());
msgTreeMapTemp.put(entry.getKey(), entry.getValue());
} else {
break;
}
}
}
if (result.isEmpty()) {
consuming = false;
}
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("take Messages exception", e);
}
return result;
}
public void clear() {
try {
this.lockTreeMap.writeLock().lockInterruptibly();
try {
this.msgTreeMap.clear();
this.msgTreeMapTemp.clear();
this.msgCount.set(0);
this.queueOffsetMax = 0L;
} finally {
this.lockTreeMap.writeLock().unlock();
}
} catch (InterruptedException e) {
log.error("rollback exception", e);
}
}
public long getLastLockTimestamp() {
return lastLockTimestamp;
}
public void setLastLockTimestamp(long lastLockTimestamp) {
this.lastLockTimestamp = lastLockTimestamp;
}
public Lock getLockConsume() {
return lockConsume;
}
public long getLastPullTimestamp() {
return lastPullTimestamp;
}
public void setLastPullTimestamp(long lastPullTimestamp) {
this.lastPullTimestamp = lastPullTimestamp;
}
public long getMsgAccCnt() {
return msgAccCnt;
}
public void setMsgAccCnt(long msgAccCnt) {
this.msgAccCnt = msgAccCnt;
}
public long getTryUnlockTimes() {
return this.tryUnlockTimes.get();
}
public void incTryUnlockTimes() {
this.tryUnlockTimes.incrementAndGet();
}
public void fillProcessQueueInfo(final ProcessQueueInfo info) {
try {
this.lockTreeMap.readLock().lockInterruptibly();
if (!this.msgTreeMap.isEmpty()) {
info.setCachedMsgMinOffset(this.msgTreeMap.firstKey());
info.setCachedMsgMaxOffset(this.msgTreeMap.lastKey());
info.setCachedMsgCount(this.msgTreeMap.size());
}
if (!this.msgTreeMapTemp.isEmpty()) {
info.setTransactionMsgMinOffset(this.msgTreeMapTemp.firstKey());
info.setTransactionMsgMaxOffset(this.msgTreeMapTemp.lastKey());
info.setTransactionMsgCount(this.msgTreeMapTemp.size());
}
info.setLocked(this.locked);
info.setTryUnlockTimes(this.tryUnlockTimes.get());
info.setLastLockTimestamp(this.lastLockTimestamp);
info.setDroped(this.dropped);
info.setLastPullTimestamp(this.lastPullTimestamp);
info.setLastConsumeTimestamp(this.lastConsumeTimestamp);
} catch (Exception e) {
} finally {
this.lockTreeMap.readLock().unlock();
}
}
public long getLastConsumeTimestamp() {
return lastConsumeTimestamp;
}
public void setLastConsumeTimestamp(long lastConsumeTimestamp) {
this.lastConsumeTimestamp = lastConsumeTimestamp;
}
}<|fim▁end|> | |
<|file_name|>na_cdot_qtree.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_qtree
short_description: Manage qtrees
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create or destroy Qtrees.
options:
state:
description:
- Whether the specified Qtree should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the Qtree to manage.
required: true
flexvol_name:<|fim▁hole|> description:
- The name of the FlexVol the Qtree should exist on. Required when C(state=present).
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create QTree
na_cdot_qtree:
state: present
name: ansibleQTree
flexvol_name: ansibleVolume
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Rename QTree
na_cdot_qtree:
state: present
name: ansibleQTree
flexvol_name: ansibleVolume
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTQTree(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
flexvol_name=dict(type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['flexvol_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.flexvol_name = p['flexvol_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_qtree(self):
"""
Checks if the qtree exists.
:return:
True if qtree found
False if qtree is not found
:rtype: bool
"""
qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-info', **{'vserver': self.vserver,
'volume': self.flexvol_name,
'qtree': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
qtree_list_iter.add_child_elem(query)
result = self.server.invoke_successfully(qtree_list_iter,
enable_tunneling=True)
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_qtree(self):
qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-create', **{'volume': self.flexvol_name,
'qtree': self.name})
try:
self.server.invoke_successfully(qtree_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_qtree(self):
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-delete', **{'qtree': path})
try:
self.server.invoke_successfully(qtree_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
def rename_qtree(self):
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-rename', **{'qtree': path,
'new-qtree-name': new_path})
try:
self.server.invoke_successfully(qtree_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
qtree_exists = False
rename_qtree = False
qtree_detail = self.get_qtree()
if qtree_detail:
qtree_exists = True
if self.state == 'absent':
# Qtree exists, but requested state is 'absent'.
changed = True
elif self.state == 'present':
if self.name is not None and not self.name == \
self.name:
changed = True
rename_qtree = True
else:
if self.state == 'present':
# Qtree does not exist, but requested state is 'present'.
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not qtree_exists:
self.create_qtree()
else:
if rename_qtree:
self.rename_qtree()
elif self.state == 'absent':
self.delete_qtree()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTQTree()
v.apply()
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import './index.css';
import React, {Component} from 'react';
import { postToggleDevice } from '../ajax';
export default class SocketDevice extends Component {
constructor() {
super();
this.state = {clicked: false, device: {}};
this.clicked = this.clicked.bind(this);
}
componentWillMount() {
var device = this.props.device;
if (device.enabled) {
this.setState({clicked: true});
}
this.setState({device: device});
}
clicked() {
var nextState = !this.state.clicked;
var id = this.state.device.id;
if (this.props.valueControl) {
this.props.valueControl(id, 'enabled', nextState);<|fim▁hole|> }
}
toggle() {
var id = this.state.device.id;
postToggleDevice(id, function (device) {
this.setState({clicked: device.enabled, device: device});
}.bind(this));
}
render() {
var name = this.state.device.name;
var classes = 'icon-tinted' + (this.state.clicked ? ' active' : '');
return (<div className="m-t-1">
<h4>
<a className={classes} onClick={this.clicked}><i className="fa fa-plug fa-lg"/></a> {name}
</h4>
</div>);
}
}
SocketDevice.propTypes = {
device: React.PropTypes.object.isRequired,
valueControl: React.PropTypes.func.isRequired
};<|fim▁end|> | this.setState({clicked: nextState});
} else {
this.toggle(id); |
<|file_name|>edition-keywords-2018-2018-expansion.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// edition:2018
// aux-build:edition-kw-macro-2018.rs
#[macro_use]
extern crate edition_kw_macro_2018;<|fim▁hole|>mod one_async {
produces_async! {} //~ ERROR expected identifier, found reserved keyword `async`
}
mod two_async {
produces_async_raw! {} // OK
}<|fim▁end|> | |
<|file_name|>coerce-reborrow-imm-ptr-rcvr.rs<|end_file_name|><|fim▁begin|>struct SpeechMaker {
speeches: uint
}
pub impl SpeechMaker {
fn how_many(&const self) -> uint { self.speeches }
}
fn foo(speaker: &const SpeechMaker) -> uint {
speaker.how_many() + 33
}
pub fn main() {
let mut lincoln = SpeechMaker {speeches: 22};
assert!(foo(&const lincoln) == 55);<|fim▁hole|><|fim▁end|> | } |
<|file_name|>AgentSpecial.py<|end_file_name|><|fim▁begin|>from Personne import *
class AgentSpecial(Personne):
"""
Exemple d'héritage simple<|fim▁hole|> --->>>> l'héritage multiple est possible
-> hérite des exceptions
Classe définissant un agent spécial.
Elle hérite de la classe Personne"""
def __init__(self, nom, matricule):
"""Un agent se définit par son nom et son matricule"""
self.nom = nom
self.matricule = matricule
#Personne.__init__(self, nom)
def __str__(self):
"""Méthode appelée lors d'une conversion de l'objet en chaîne"""
return "Agent {0}, matricule {1}".format(self.nom, self.matricule)<|fim▁end|> | |
<|file_name|>pie-chart.component.ts<|end_file_name|><|fim▁begin|>// Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Directive for pie chart visualization.
*/
angular.module('oppia').component('pieChart', {
bindings: {
// A read-only array representing the table of chart data.
data: '&',
// A read-only object containing several chart options. This object
// should have the following keys: pieHole, pieSliceTextStyleColor,
// chartAreaWidth, colors, height, legendPosition, width.
options: '&'
},
controller: ['$element', '$scope', 'WindowDimensionsService', function(
$element, $scope, WindowDimensionsService) {
var ctrl = this;
ctrl.resizeSubscription = null;
ctrl.$onInit = function() {
if (!$.isArray(ctrl.data())) {
return;
}
var options = ctrl.options();
var chart = null;
var redrawChart = function() {
if (chart !== null) {
chart.draw(google.visualization.arrayToDataTable(ctrl.data()), {
title: options.title,
pieHole: options.pieHole,
pieSliceTextStyle: {
color: options.pieSliceTextStyleColor,
},
pieSliceBorderColor: options.pieSliceBorderColor,<|fim▁hole|> width: options.chartAreaWidth
},
colors: options.colors,
height: options.height,
legend: {
position: options.legendPosition || 'none'
},
width: options.width
});
}
};
// Need to wait for load statement in editor template to finish.
// https://stackoverflow.com/questions/42714876/
google.charts.setOnLoadCallback(function() {
if (!chart) {
chart = new google.visualization.PieChart($element[0]);
redrawChart();
$scope.$applyAsync();
}
});
$scope.$watch('data()', redrawChart);
ctrl.resizeSubscription = WindowDimensionsService.getResizeEvent()
.subscribe(evt => {
redrawChart();
$scope.$applyAsync();
});
};
ctrl.$onDestroy = function() {
if (ctrl.resizeSubscription) {
ctrl.resizeSubscription.unsubscribe();
}
};
}]
});<|fim▁end|> | pieSliceText: 'none',
chartArea: {
left: options.left, |
<|file_name|>cmake.py<|end_file_name|><|fim▁begin|># Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
CMAKE_INSTALL_MANIFEST_FILENAME = 'install_manifest.txt'
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class CMakeIOBufferProtocol(IOBufferProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:]
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
self.suppress_stdout = suppress_stdout
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''<|fim▁hole|> # - output formatting line (subs captured groups)
# - functor which filters captured groups
filters = [
(r'^-- :(.+)', '@{cf}--@| :@{yf}{}@|', None),
(r'^-- (.+)', '@{cf}--@| {}', None),
(r'CMake Error at (.+):(.+)', '@{rf}@!CMake Error@| at {}:{}', self.abspath),
(r'CMake Warning at (.+):(.+)', '@{yf}@!CMake Warning@| at {}:{}', self.abspath),
(r'CMake Warning (dev) at (.+):(.+)', '@{yf}@!CMake Warning (dev)@| at {}:{}', self.abspath),
(r'(?i)(warning.*)', '@{yf}{}@|', None),
(r'(?i)ERROR:(.*)', '@!@{rf}ERROR:@|{}@|', None),
(r'Call Stack \(most recent call first\):(.*)', '@{cf}Call Stack (most recent call first):@|{}', None),
]
self.filters = [(re.compile(p), r, f) for (p, r, f) in filters]
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line).rstrip()
if len(cline.strip()) > 0:
for p, r, f in self.filters:
match = p.match(cline)
if match is not None:
cline = fmt(r, reset=False)
if f is not None:
cline = cline.format(*f(match.groups()))
else:
cline = cline.format(*match.groups())
break
return cline + '\n'
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progress prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
self.send_progress(data)
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
class CMakeMakeRunTestsIOBufferProtocol(CMakeMakeIOBufferProtocol):
"""An IOBufferProtocol which parses the output of `make run_tests`."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose
def on_stdout_received(self, data):
self.send_progress(data)
data = self._decode(data)
if data.startswith('-- run_tests.py: execute command'):
self.in_test_output = True
elif data.startswith('-- run_tests.py: verify result'):
self.in_test_output = False
if self.verbose or self.in_test_output:
colored = self.colorize_run_tests(data)
super(CMakeMakeRunTestsIOBufferProtocol, self).on_stdout_received(colored.encode())
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n'
@classmethod
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory
def get_installed_files(path):
"""Get a set of files installed by a CMake package as specified by an
install_manifest.txt in a given directory."""
install_manifest_path = os.path.join(
path,
CMAKE_INSTALL_MANIFEST_FILENAME)
installed_files = set()
if os.path.exists(install_manifest_path):
with open(install_manifest_path) as f:
installed_files = set([line.strip() for line in f.readlines()])
return installed_files<|fim▁end|> |
# Line formatting filters
# Each is a 3-tuple:
# - regular expression (with captured groups) |
<|file_name|>model_post_universe_names_not_found_easyjson.go<|end_file_name|><|fim▁begin|>// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
package esi
import (
json "encoding/json"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// suppress unused package warning
var (
_ *json.RawMessage
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjson86b0732DecodeGithubComAntihaxGoesiEsi(in *jlexer.Lexer, out *PostUniverseNamesNotFoundList) {
isTopLevel := in.IsStart()
if in.IsNull() {
in.Skip()
*out = nil
} else {
in.Delim('[')
if *out == nil {
if !in.IsDelim(']') {
*out = make(PostUniverseNamesNotFoundList, 0, 4)
} else {
*out = PostUniverseNamesNotFoundList{}
}
} else {
*out = (*out)[:0]
}
for !in.IsDelim(']') {
var v1 PostUniverseNamesNotFound
(v1).UnmarshalEasyJSON(in)
*out = append(*out, v1)
in.WantComma()
}
in.Delim(']')
}
if isTopLevel {
in.Consumed()
}
}<|fim▁hole|> out.RawByte('[')
for v2, v3 := range in {
if v2 > 0 {
out.RawByte(',')
}
(v3).MarshalEasyJSON(out)
}
out.RawByte(']')
}
}
// MarshalJSON supports json.Marshaler interface
func (v PostUniverseNamesNotFoundList) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson86b0732EncodeGithubComAntihaxGoesiEsi(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v PostUniverseNamesNotFoundList) MarshalEasyJSON(w *jwriter.Writer) {
easyjson86b0732EncodeGithubComAntihaxGoesiEsi(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *PostUniverseNamesNotFoundList) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson86b0732DecodeGithubComAntihaxGoesiEsi(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *PostUniverseNamesNotFoundList) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson86b0732DecodeGithubComAntihaxGoesiEsi(l, v)
}
func easyjson86b0732DecodeGithubComAntihaxGoesiEsi1(in *jlexer.Lexer, out *PostUniverseNamesNotFound) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "error":
out.Error_ = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson86b0732EncodeGithubComAntihaxGoesiEsi1(out *jwriter.Writer, in PostUniverseNamesNotFound) {
out.RawByte('{')
first := true
_ = first
if in.Error_ != "" {
const prefix string = ",\"error\":"
first = false
out.RawString(prefix[1:])
out.String(string(in.Error_))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v PostUniverseNamesNotFound) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjson86b0732EncodeGithubComAntihaxGoesiEsi1(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v PostUniverseNamesNotFound) MarshalEasyJSON(w *jwriter.Writer) {
easyjson86b0732EncodeGithubComAntihaxGoesiEsi1(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *PostUniverseNamesNotFound) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson86b0732DecodeGithubComAntihaxGoesiEsi1(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *PostUniverseNamesNotFound) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson86b0732DecodeGithubComAntihaxGoesiEsi1(l, v)
}<|fim▁end|> | func easyjson86b0732EncodeGithubComAntihaxGoesiEsi(out *jwriter.Writer, in PostUniverseNamesNotFoundList) {
if in == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 {
out.RawString("null")
} else { |
<|file_name|>3vilTwin-Attacker.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2.7
#The MIT License (MIT)<|fim▁hole|>#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
from Modules.utils import Refactor
def ExecRootApp():
check_dependencies()
root = QApplication(argv)
app = Initialize()
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center(),app.show()
exit(root.exec_())
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show(),app2.exec_()
exit(Refactor.threadRoot(priv.Editpassword.text()))
ExecRootApp()<|fim▁end|> | |
<|file_name|>fleet.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-
#
#
# Copyright (C) 2015 Clear ICT Solutions <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import api, fields, models
class FleetVehicle(models.Model):
_inherit = 'fleet.vehicle'
@api.model
def _get_user_department(self):
res = self.env['hr.department']
for employee in self.env.user.employee_ids:
if employee.department_id:
res = employee.department_id
break
return res
# Fields
#<|fim▁hole|> department = fields.Many2one('hr.department', default=_get_user_department)<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
pytest-pylint
=============
Plugin for py.test for doing pylint tests
"""
from setuptools import setup<|fim▁hole|> long_description=open("README.rst").read(),
license="MIT",
version='0.3.0',
author='Carson Gee',
author_email='[email protected]',
url='https://github.com/carsongee/pytest-pylint',
py_modules=['pytest_pylint'],
entry_points={'pytest11': ['pylint = pytest_pylint']},
install_requires=['pytest>=2.4', 'pylint', 'six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)<|fim▁end|> |
setup(
name='pytest-pylint',
description='pytest plugin to check source code with pylint', |
<|file_name|>msp430.py<|end_file_name|><|fim▁begin|>#
# This file is part of GreatFET
#
#
# FIXME: rewrite to be a compliant GreatFET JTAGDevice.
#
from itertools import chain
from ..interface import GreatFETInterface
from ..programmer import GreatFETProgrammer
class JTAG_MSP430(GreatFETProgrammer):
MSP430_ident = 0x00
def __init__(self, board):
"""
Initialize a new MSP430 JTAG instance.
Args:
board -- The GreatFET board connected to the target.
"""
self.board = board
def start(self):
"""Initialise the JTAG hardware and target device."""
self.JTAGID = self.board.apis.jtag_msp430.start()
if(not (self.JTAGID==0x89 or self.JTAGID==0x91)):
#Try once more
self.JTAGID = self.board.apis.jtag_msp430.start()
if self.JTAGID in (0x89, 0x91):
self.halt_cpu()
return self.JTAGID
def stop(self):
"""Stop debugging."""
self.board.apis.jtag_msp430.stop()
def peek(self, address, length=2):
"""
Read a word at an address.
Args:
address -- The memory address to read from the target.
length -- Number of bytes to read.
"""
return self.board.apis.jtag_msp430.read_mem(address, length)
def peek_block(self, address, block_size=0x400):
"""Grab a large block from an SPI Flash ROM."""
data = self.peek(address, block_size)
byte_pairs = [(x&0xFF, (x&0xFF00)>>8) for x in data]
data_bytes = bytes(chain.from_iterable(byte_pairs))
return data_bytes
def poke(self, address, value):
"""
Write the contents of memory at an address.
Args:
address -- The memory address to be written.
value -- Value to write to location.
"""
return self.board.apis.jtag_msp430.write_mem(address, value)
def poke_flash_block(self, address, data):
"""
Write the contents of flash memory at an address.
Args:
address -- The memory address to be written.
data -- Words to write to flash
"""
value = self.board.apis.jtag_msp430.write_flash(address, data, timeout=30000)
return value
def poke_flash(self, address, value):
"""
Write a single word to flash at an address.
Args:
address -- The memory address to be written.
value -- Valuse to write to location
"""
value = self.poke_flash_block(address, (value,))
return value
def set_secret(self,value):
"""Set a secret word for later retreival. Used by glitcher."""
self.poke_flash(0xFFFE, value)
def get_secret(self):
"""Get a secret word. Used by glitcher."""
return self.peek(0xfffe)
def halt_cpu(self):
"""Halt the CPU."""
self.board.apis.jtag_msp430.halt_cpu()
def release_cpu(self):<|fim▁hole|> """Resume the CPU."""
self.board.apis.jtag_msp430.relase_cpu()
def set_instruction_fetch(self):
"""Set the instruction fetch mode."""
self.board.apis.jtag_msp430.set_instruction_fetch()
def ident(self):
"""Fetch self-identification word from 0x0FF0 as big endian."""
if self.MSP430_ident == 0x00:
if self.JTAGID == 0x89:
i=self.peek(0x0ff0)
if self.JTAGID == 0x91 :
i=self.peek(0x1A04)
if len(i) >= 1:
self.MSP430_ident = ((i[0]&0xFF00)>>8)+((i[0]&0xFF)<<8)
return self.MSP430_ident
devices = {
#MSP430F2xx
0xf227: "MSP430F22xx",
0xf213: "MSP430F21x1",
0xf249: "MSP430F24x",
0xf26f: "MSP430F261x",
0xf237: "MSP430F23x0",
0xf201: "MSP430F201x",
#Are G's and F's distinct?
0x2553: "MSP430G2553",
#MSP430F1xx
0xf16c: "MSP430F161x",
0xf149: "MSP430F13x", #or f14x(1)
0xf112: "MSP430F11x", #or f11x1
0xf143: "MSP430F14x",
0xf112: "MSP430F11x", #or F11x1A
0xf123: "MSP430F1xx", #or F123x
0x1132: "MSP430F1122", #or F1132
0x1232: "MSP430F1222", #or F1232
0xf169: "MSP430F16x",
#MSP430F4xx
0xF449: "MSP430F43x", #or F44x
0xF427: "MSP430FE42x", #or FW42x, F415, F417
0xF439: "MSP430FG43x",
0xf46f: "MSP430FG46xx", #or F471xx
0xF413: "MSP430F413", #or maybe others.
}
def ident_string(self):
"""Grab model string."""
return self.devices.get(self.ident())
def erase_flash(self):
"""Erase MSP430 flash memory."""
self.board.apis.jtag_msp430.erase_flash()
def erase_info(self):
"""Erase MSP430 info flash."""
self.board.apis.jtag_msp430.erase_info()
def set_pc(self, pc):
"""Set the program counter."""
self.board.apis.jtag_msp430.set_pc(pc)
def set_reg(self,reg,val):
"""Set a register."""
self.board.apis.jtag_msp430.set_reg(reg, val)
def get_reg(self,reg):
"""Get a register."""
return self.board.apis.jtag_msp430.get_reg(reg)
def run(self):
"""Reset the MSP430 to run on its own."""
self.board.apis.jtag_msp430.release_cpu()
def dump_bsl(self):
self.dump_memory(0xC00, 0xfff)
def dump_all_memory(self):
self.dump_memory(0x200, 0xffff)
def dump_memory(self, begin, end):
i=begin
while i<end:
print("%04x %04x" % (i, self.peek(i)))
i+=2<|fim▁end|> | |
<|file_name|>partial_cmp.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
// macro_rules! e {
// ($e:expr) => { $e }
// }
// macro_rules! tuple_impls {
// ($(
// $Tuple:ident {
// $(($idx:tt) -> $T:ident)+
// }
// )+) => {
// $(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Clone),+> Clone for ($($T,)+) {
// fn clone(&self) -> ($($T,)+) {
// ($(e!(self.$idx.clone()),)+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:PartialEq),+> PartialEq for ($($T,)+) {
// #[inline]
// fn eq(&self, other: &($($T,)+)) -> bool {
// e!($(self.$idx == other.$idx)&&+)
// }
// #[inline]
// fn ne(&self, other: &($($T,)+)) -> bool {
// e!($(self.$idx != other.$idx)||+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Eq),+> Eq for ($($T,)+) {}
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+) {
// #[inline]
// fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
// lexical_partial_cmp!($(self.$idx, other.$idx),+)
// }
// #[inline]
// fn lt(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(lt, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn le(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(le, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn ge(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(ge, $(self.$idx, other.$idx),+)
// }
// #[inline]
// fn gt(&self, other: &($($T,)+)) -> bool {
// lexical_ord!(gt, $(self.$idx, other.$idx),+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Ord),+> Ord for ($($T,)+) {
// #[inline]
// fn cmp(&self, other: &($($T,)+)) -> Ordering {
// lexical_cmp!($(self.$idx, other.$idx),+)
// }
// }
//
// #[stable(feature = "rust1", since = "1.0.0")]
// impl<$($T:Default),+> Default for ($($T,)+) {
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// fn default() -> ($($T,)+) {
// ($({ let x: $T = Default::default(); x},)+)
// }
// }
// )+
// }
// }
// // Constructs an expression that performs a lexical ordering using method $rel.
// // The values are interleaved, so the macro invocation for
// // `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, a1, b1, a2, b2,
// // a3, b3)` (and similarly for `lexical_cmp`)
// macro_rules! lexical_ord {
// ($rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// if $a != $b { lexical_ord!($rel, $a, $b) }
// else { lexical_ord!($rel, $($rest_a, $rest_b),+) }
// };
// ($rel: ident, $a:expr, $b:expr) => { ($a) . $rel (& $b) };
// }
// macro_rules! lexical_partial_cmp {
// ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// match ($a).partial_cmp(&$b) {
// Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
// ordering => ordering
// }
// };
// ($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
// }
// macro_rules! lexical_cmp {
// ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
// match ($a).cmp(&$b) {
// Equal => lexical_cmp!($($rest_a, $rest_b),+),
// ordering => ordering
// }
// };
// ($a:expr, $b:expr) => { ($a).cmp(&$b) };
// }
// tuple_impls! {
// Tuple1 {
// (0) -> A
// }
// Tuple2 {
// (0) -> A
// (1) -> B
// }
// Tuple3 {
// (0) -> A
// (1) -> B
// (2) -> C
// }
// Tuple4 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// }
// Tuple5 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// }
// Tuple6 {<|fim▁hole|> // (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// }
// Tuple7 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// }
// Tuple8 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// }
// Tuple9 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// }
// Tuple10 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// }
// Tuple11 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// (10) -> K
// }
// Tuple12 {
// (0) -> A
// (1) -> B
// (2) -> C
// (3) -> D
// (4) -> E
// (5) -> F
// (6) -> G
// (7) -> H
// (8) -> I
// (9) -> J
// (10) -> K
// (11) -> L
// }
// }
#[cfg(test)]
mod tests {
use core::cmp::Ordering::{self, Less, Equal, Greater};
macro_rules! partial_cmp_test {
(
$($T:ident)+
) => (
{
let left: ($($T,)+) = ($($T::default(),)+);
let right: ($($T,)+) = ($($T::default() + 1 as $T,)+);
let result: Option<Ordering> = left.partial_cmp(&right);
match result {
Some(v) => assert_eq!(v, Less),
None => assert!(false)
}
}
{
let left: ($($T,)+) = ($($T::default(),)+);
let right: ($($T,)+) = ($($T::default(),)+);
let result: Option<Ordering> = left.partial_cmp(&right);
match result {
Some(v) => assert_eq!(v, Equal),
None => assert!(false)
}
}
{
let left: ($($T,)+) = ($($T::default() + 1 as $T,)+);
let right: ($($T,)+) = ($($T::default(),)+);
let result: Option<Ordering> = left.partial_cmp(&right);
match result {
Some(v) => assert_eq!(v, Greater),
None => assert!(false)
}
}
)
}
type A = u8;
type B = u16;
type C = u32;
type D = u64;
type E = usize;
type F = i8;
type G = i16;
type H = i32;
#[test]
fn partial_cmp_test1() {
partial_cmp_test! { A B C D E F G H };
}
}<|fim▁end|> | // (0) -> A |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {<|fim▁hole|> 'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)<|fim▁end|> | 'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache', |
<|file_name|>auth_routes_test.js<|end_file_name|><|fim▁begin|>/* test/auth_route_spec.js -- test Authentication helper
* Copyright 2014 Sergei Ianovich
*
* Licensed under AGPL-3.0 or later, see LICENSE
* Process Control Service Web Interface
*/
var expect = require('expect.js');
var Routes = require('../routes/_auth');
var User = require('../models/user');
describe('Authentication helper', function() {
describe("#authenticate", function() {
describe("when request for not html page", function() {
it("should send 401 if session is not inited", function(done) {
var req = { session: {}, url: "someurl", headers: {accept: 'application/json'} },
res = { send: function(code) {
expect(code).to.eql(401);
done();
}};
Routes.authenticate(req, res, null);
});
<|fim▁hole|> done();
}};
Routes.authenticate(req, res, null);
});
});
describe("when request for html page", function() {
it("should redirect to signin if session is not inited", function(done) {
var req = { session: {}, url: "someurl", headers: {accept: 'application/html'} },
res = { redirect: function(url) {
expect(url).to.eql("/signin");
done();
}};
Routes.authenticate(req, res, null);
});
it("should reload if user is not found", function(done) {
var req = { session: { operator: 2220 }, headers: {accept: 'application/html'} },
res = { redirect: function(url) {
expect(url).to.eql('/signin');
done();
}};
Routes.authenticate(req, res, null);
});
});
it("should assign operator", function(done) {
var user = null;
var userAttrs = {
name: "Example User",
email: "[email protected]",
password: 'password',
confirmation: 'password',
};
(new User(userAttrs)).save(function(err, user) {
var req = { session: { operator: user.email } },
res = { locals: { } },
next = function() {
expect(res.locals.operator.toJSON()).to.eql(user.toJSON());
expect(req.operator.toJSON()).to.eql(user.toJSON());
done();
};
Routes.authenticate(req, res, next);
});
});
});
});<|fim▁end|> | it("should send 401 if user is not found", function(done) {
var req = { session: { operator: 2220 }, headers: {accept: 'application/json'} },
res = { send: function(code) {
expect(code).to.eql(401); |
<|file_name|>reqparse_fixed_type.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from datetime import datetime, date
import six
def fix_number(target_type):
return lambda value: None if isinstance(value, (str, six.text_type)) and len(value) == 0 else target_type(value)<|fim▁hole|>fixed_date = lambda time_str: date.fromtimestamp(time_str)
fixed_int = fix_number(int)
fixed_float = fix_number(float)<|fim▁end|> |
fixed_datetime = lambda time_str: datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S') |
<|file_name|>modelos.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb<|fim▁hole|>class Escravo(Node):
name = ndb.StringProperty(required=True)
age = ndb.IntegerProperty()
birth = ndb.DateProperty(auto_now=True)
price = ndb.FloatProperty()
estatura = ndb.FloatProperty()
class DonoArco(Arc):
destination = ndb.KeyProperty(Escravo, required=True)<|fim▁end|> | from gaegraph.model import Node, Arc
|
<|file_name|>app.animations.ts<|end_file_name|><|fim▁begin|>import { animate, AnimationEntryMetadata, state, style, transition, trigger } from '@angular/core';
// Component transition animations
export const fadeAnimation: AnimationEntryMetadata =
trigger('routeAnimation', [
state('*',
style({
opacity: 1,
}),
),
transition(':enter', [
style({
opacity: 0,
}),
animate('0.3s ease-in'),
]),
transition(':leave', [
animate('0.5s ease-out', style({
opacity: 0,
})),
]),
]);
export const slideInLeftAnimation: AnimationEntryMetadata =
trigger('routeAnimation', [
state('*',
style({
opacity: 1,
transform: 'translateX(0)',
}),
),
transition(':enter', [
style({
opacity: 0,
transform: 'translateX(-100%)',
}),
animate('0.3s ease-in'),
]),
transition(':leave', [
animate('0.5s ease-out', style({
opacity: 0,
transform: 'translateX(100%)',
})),
]),
]);
export const slideInDownAnimation: AnimationEntryMetadata =
trigger('routeAnimation', [
state('*',
style({
opacity: 1,
transform: 'translateY(0)',
}),
),
transition(':enter', [
style({
opacity: 0,
transform: 'translateY(-100%)',
}),<|fim▁hole|> transition(':leave', [
animate('0.5s ease-out', style({
opacity: 0,
transform: 'translateY(100%)',
})),
]),
]);<|fim▁end|> | animate('0.3s ease-in'),
]), |
<|file_name|>identify.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of overview archive.
# Copyright © 2015 seamus tuohy, <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT<|fim▁hole|>
# identification
from os import path
from os.path import abspath
from urllib.parse import urlparse
from urllib.request import urlopen
import magic
from urllib.error import HTTPError
# logging
import logging
log = logging.getLogger("oa.{0}".format(__name__))
def filetype(file_path):
if path.exists(file_path) and path.isfile(file_path):
try:
file_type = magic.from_file(abspath(file_path), mime=True)
except IOError:
log.error("{0} is not a valid file".format(file_path))
raise IOError("{0} is not a valid file".format(file_path))
else:
log.error("{0} is not a valid path to a file".format(file_path))
raise IOError("{0} is not a valid path to a file".format(file_path))
log.debug("filetype for {0} identified as {1}".format(file_path, file_type))
return file_type
def is_url(link):
try:
site = urlopen(link)
return True
except (ValueError, HTTPError):
return False
return False
def is_archive(link):
try:
parsed_url = urlparse(link)
if parsed_url.netloc == 'web.archive.org':
return True
except ValueError:
return False
return False<|fim▁end|> | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. |
<|file_name|>highlighter.hpp<|end_file_name|><|fim▁begin|>/*
SuperCollider Qt IDE
Copyright (c) 2012 Jakob Leben & Tim Blechmann
http://www.audiosynth.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software<|fim▁hole|>
#ifndef SCIDE_SC_SYNTAX_HIGHLIGHTER_HPP_INCLUDED
#define SCIDE_SC_SYNTAX_HIGHLIGHTER_HPP_INCLUDED
#include "tokens.hpp"
#include <QSyntaxHighlighter>
#include <QVector>
namespace ScIDE {
namespace Settings { class Manager; }
class Main;
enum SyntaxFormat
{
PlainFormat,
ClassFormat,
KeywordFormat,
BuiltinFormat,
PrimitiveFormat,
SymbolFormat,
StringFormat,
CharFormat,
NumberFormat,
EnvVarFormat,
CommentFormat,
FormatCount
};
struct SyntaxRule
{
SyntaxRule(): type(Token::Unknown) {}
SyntaxRule( Token::Type t, const QString &s ): type(t), expr(s) {}
Token::Type type;
QRegExp expr;
};
class SyntaxHighlighterGlobals : public QObject
{
Q_OBJECT
public:
SyntaxHighlighterGlobals( Main *, Settings::Manager * settings );
inline const QTextCharFormat * formats() const
{
return mFormats;
}
inline const QTextCharFormat & format( SyntaxFormat type ) const
{
return mFormats[type];
}
inline static const SyntaxHighlighterGlobals * instance() { Q_ASSERT(mInstance); return mInstance; }
public Q_SLOTS:
void applySettings( Settings::Manager * );
Q_SIGNALS:
void syntaxFormatsChanged();
private:
friend class SyntaxHighlighter;
void initSyntaxRules();
void initKeywords();
void initBuiltins();
void applySettings( Settings::Manager*, const QString &key, SyntaxFormat );
QTextCharFormat mFormats[FormatCount];
QVector<SyntaxRule> mInCodeRules;
QRegExp mInSymbolRegexp, mInStringRegexp;
static SyntaxHighlighterGlobals *mInstance;
};
class SyntaxHighlighter:
public QSyntaxHighlighter
{
Q_OBJECT
static const int inCode = 0;
static const int inString = 1;
static const int inSymbol = 2;
static const int inComment = 100;
// NOTE: Integers higher than inComment are reserved for multi line comments,
// and indicate the comment nesting level!
public:
SyntaxHighlighter(QTextDocument *parent = 0);
private:
void highlightBlock(const QString &text);
void highlightBlockInCode(const QString& text, int & currentIndex, int & currentState);
void highlightBlockInString(const QString& text, int & currentIndex, int & currentState);
void highlightBlockInSymbol(const QString& text, int & currentIndex, int & currentState);
void highlightBlockInComment(const QString& text, int & currentIndex, int & currentState);
Token::Type findMatchingRule(QString const & text, int & currentIndex, int & lengthOfMatch);
const SyntaxHighlighterGlobals *mGlobals;
};
}
#endif<|fim▁end|> | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ |
<|file_name|>process.rs<|end_file_name|><|fim▁begin|>#![allow(dead_code, unused_variables, unused_imports)]
use {Async, Stream, Sender, AsyncResult, AsyncError};
use syncbox::ArrayQueue;
use std::{mem, ops};
use std::cell::UnsafeCell;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
const MAX_IN_FLIGHT: usize = (1 << 16) - 1;
pub fn process<T, U, F>(source: Stream<T, U::Error>, in_flight: usize, action: F) -> Stream<U::Value, U::Error>
where T: Send + 'static,
U: Async,
F: FnMut(T) -> U + Send + 'static {
// New stream
let (tx, rx) = Stream::pair();
// Wait for consumer interest
if in_flight > 0 {
tx.receive(move |res| {
if let Ok(tx) = res {
setup(source, in_flight, action, tx);
}
});
}
rx
}
fn setup<T, U, F>(source: Stream<T, U::Error>, in_flight: usize, action: F, dest: Sender<U::Value, U::Error>)
where T: Send + 'static,
U: Async,
F: FnMut(T) -> U + Send + 'static {
let mut inner = Inner::new(source, in_flight, action, dest);
inner.maybe_process_next(false);
}
struct Core<T: Send + 'static, U: Async, F> {
max: usize,
queue: ArrayQueue<AsyncResult<U::Value, U::Error>>,
sender: Option<Sender<U::Value, U::Error>>,
source: Option<Source<T, U, F>>,
consume_state: AtomicState,
produce_state: AtomicState,
}
struct Source<T: Send + 'static, U: Async, F> {
stream: Stream<T, U::Error>,
action: F,
}
struct Inner<T: Send + 'static, U: Async, F>(Arc<UnsafeCell<Core<T, U, F>>>);
impl<T: Send + 'static, U: Async, F: FnMut(T) -> U + Send + 'static> Inner<T, U, F> {
fn new(source: Stream<T, U::Error>,
in_flight: usize,
action: F,
dest: Sender<U::Value, U::Error>) -> Inner<T, U, F> {
let core = Core {
max: in_flight,
queue: ArrayQueue::with_capacity(in_flight),
source: Some(Source { stream: source, action: action }),
sender: Some(dest),
consume_state: AtomicState::new(),
produce_state: AtomicState::new(),
};
Inner(Arc::new(UnsafeCell::new(core)))
}
fn maybe_process_next(&mut self, dec_in_flight: bool) {
// Attempt to acquire the consume lock and increment the number of
// in-flight queries. An acquire ordering is used to ensure that the
// source value is readable.
if self.try_acquire_consume_lock(dec_in_flight, Ordering::Acquire) {
// Access to the source has been acquired
let Source { stream, mut action } = self.source.take().expect("source should be present");
let mut inner = self.clone();
// Wait for the next value to be provided
stream.receive(move |res| {
match res {
Ok(Some((val, rest))) => {
// Process the value and wait for the result
let val = action(val);
// Return the source. Release ordering is used to flush
// the memory so that another thread may access it.
inner.source = Some(Source { stream: rest, action: action });
inner.consume_state.release_lock(Ordering::Release);
let mut inner2 = inner.clone();
// Wait for the result
val.receive(move |res| {
match res {
Ok(val) => {
inner2.receive_value(Ok(val));
}
Err(err) => {
inner2.receive_value(Err(err));
}
}
});
// Since the results are buffered, attempt to read
// another value immediately
inner.maybe_process_next(false);
}
Ok(None) => {}
Err(AsyncError::Failed(_)) => {
unimplemented!();
}
_ => unimplemented!(),
}
});<|fim▁hole|> // Push the value onto the queue
self.queue.push(val).ok()
.expect("value queue should never run out of capacity");
// Increment the number of values pending in the queue
//
// TODO: if the stream has failed, discard the value by popping from
// the queue
if self.acquire_produce_lock_or_inc_in_flight(Ordering::Acquire) {
// Produce lock has been acquired
self.send_value();
}
}
fn send_value(&mut self) {
let sender = self.sender.take().expect("expected sender to be sender");
let value = self.queue.pop().expect("expected value to be in queue");
match value {
Ok(value) => {
let mut inner = self.clone();
sender.send(value).receive(move |res| {
match res {
Ok(sender) => {
// Save off the sender in case the lock is released
inner.sender = Some(sender);
if inner.release_lock_if_idle(Ordering::Release) {
// in-flight has been decremented, also, even though
// the previous memory fence is a release, the only
// memory that is accessed has been set previously in
// the current thread
inner.send_value();
}
}
Err(_) => {
// TODO: Transition to a canceled state and discard
// currently pending values
}
}
});
}
Err(AsyncError::Failed(e)) => sender.fail(e),
Err(AsyncError::Aborted) => sender.abort(),
}
// Will decrement the consumer in-flight and potentially start
// processing another value
self.maybe_process_next(true);
}
fn try_acquire_consume_lock(&self, dec_in_flight: bool, order: Ordering) -> bool {
let mut old = self.consume_state.load(Ordering::Relaxed);
loop {
// If the consume lock has already been acquired and in-flight is
// not being decremented, then the state does not need to
// transition. Nothing else to do, the lock has not been acquired.
if (old.has_lock() || old.in_flight() == self.max) && !dec_in_flight {
return false;
}
let new = if old.has_lock() {
debug_assert!(dec_in_flight, "state transition bug");
// The lock coul dnot be acquired, but the num in-flight must
// be decremented
old.dec_in_flight()
} else {
debug_assert!(old.in_flight() < self.max || dec_in_flight, "state transition bug");
if dec_in_flight {
old.with_lock()
} else {
old.inc_in_flight().with_lock()
}
};
let act = self.consume_state.compare_and_swap(old, new, order);
if act == old {
return !old.has_lock();
}
old = act;
}
}
fn try_acquire_produce_lock(&self, order: Ordering) -> bool {
let mut old = self.produce_state.load(order);
loop {
if old.has_lock() {
return false;
}
let act = self.produce_state.compare_and_swap(old, old.with_lock(), order);
if act == old {
return true;
}
old = act
}
}
// Increment the in-flight counter and attempt to acquire the produce lock
fn acquire_produce_lock_or_inc_in_flight(&self, order: Ordering) -> bool {
let mut old = self.produce_state.load(Ordering::Relaxed);
loop {
let new = if old.has_lock() {
old.inc_in_flight()
} else {
old.with_lock()
};
let act = self.produce_state.compare_and_swap(old, new, order);
if act == old {
return !old.has_lock();
}
old = act;
}
}
fn release_lock_if_idle(&self, order: Ordering) -> bool {
let mut old = self.produce_state.load(Ordering::Relaxed);
loop {
let new = if old.in_flight() > 0 {
old.dec_in_flight()
} else {
old.without_lock()
};
let act = self.produce_state.compare_and_swap(old, new, order);
if act == old {
return new.has_lock();
}
old = act;
}
}
}
impl<T: Send + 'static, U: Async, F> ops::Deref for Inner<T, U, F> {
type Target = Core<T, U, F>;
fn deref(&self) -> &Core<T, U, F> {
unsafe { mem::transmute(self.0.get()) }
}
}
impl<T: Send + 'static, U: Async, F> ops::DerefMut for Inner<T, U, F> {
fn deref_mut(&mut self) -> &mut Core<T, U, F> {
unsafe { mem::transmute(self.0.get()) }
}
}
impl<T: Send + 'static, U: Async, F> Clone for Inner<T, U, F> {
fn clone(&self) -> Inner<T, U, F> {
Inner(self.0.clone())
}
}
unsafe impl<T: Send, U: Async, F> Send for Inner<T, U, F> { }
unsafe impl<T: Send, U: Async, F> Sync for Inner<T, U, F> { }
const LOCK: usize = 1 << 31;
#[derive(Copy, Clone, Eq, PartialEq)]
struct State(usize);
impl State {
fn in_flight(&self) -> usize {
self.0 & MAX_IN_FLIGHT
}
fn inc_in_flight(&self) -> State {
assert!(self.in_flight() < MAX_IN_FLIGHT);
State(self.0 + 1)
}
fn dec_in_flight(&self) -> State {
assert!(self.in_flight() > 0);
State(self.0 - 1)
}
fn has_lock(&self) -> bool {
self.0 & LOCK == LOCK
}
fn with_lock(&self) -> State {
State(self.0 | LOCK)
}
fn without_lock(&self) -> State {
State(!LOCK & self.0)
}
}
struct AtomicState {
atomic: AtomicUsize,
}
impl AtomicState {
fn new() -> AtomicState {
AtomicState { atomic: AtomicUsize::new(0) }
}
fn load(&self, order: Ordering) -> State {
let val = self.atomic.load(order);
State(val)
}
fn compare_and_swap(&self, old: State, new: State, order: Ordering) -> State {
let val = self.atomic.compare_and_swap(old.0, new.0, order);
State(val)
}
fn release_lock(&self, order: Ordering) {
self.atomic.fetch_sub(LOCK, order);
}
}<|fim▁end|> | }
}
fn receive_value(&mut self, val: AsyncResult<U::Value, U::Error>) { |
<|file_name|>qtip.js<|end_file_name|><|fim▁begin|>jQuery(function($) {
$('ul li.active').qtip({
content: 'This is an active list element',
show: 'mouseover',
hide: 'mouseout',
position: { target: 'mouse' } <|fim▁hole|>});<|fim▁end|> | }) |
<|file_name|>test_constructors.py<|end_file_name|><|fim▁begin|>import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
<|fim▁hole|> # https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, msg",
[
(
[pd.Period("2017", "D"), pd.Period("2017", "A")],
None,
"Input has different freq",
),
([pd.Period("2017", "D")], "A", "Input has different freq"),
],
)
def test_period_array_raises(data, freq, msg):
with pytest.raises(IncompatibleFrequency, match=msg):
period_array(data, freq)
def test_period_array_non_period_series_raies():
ser = pd.Series([1, 2, 3])
with pytest.raises(TypeError, match="dtype"):
PeriodArray(ser, freq="D")
def test_period_array_freq_mismatch():
arr = period_array(["2000", "2001"], freq="D")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq="M")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq=pd.tseries.offsets.MonthEnd())
def test_from_sequence_disallows_i8():
arr = period_array(["2000", "2001"], freq="D")
msg = str(arr[0].ordinal)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)<|fim▁end|> |
def test_from_datetime64_freq_changes(): |
<|file_name|>batch.go<|end_file_name|><|fim▁begin|>package libldbrest
import (
"errors"
"github.com/jmhodges/levigo"
)
type oplist []*struct {
Op, Key, Value string
}
var errBadBatch = errors.New("bad write batch")
func applyBatch(ops oplist) error {
wb := levigo.NewWriteBatch()
defer wb.Close()
for _, op := range ops {
switch op.Op {<|fim▁hole|> wb.Delete([]byte(op.Key))
default:
return errBadBatch
}
}
return db.Write(wo, wb)
}<|fim▁end|> | case "put":
wb.Put([]byte(op.Key), []byte(op.Value))
case "delete": |
<|file_name|>test_client.py<|end_file_name|><|fim▁begin|>import soundcloud
from soundcloud.tests.utils import MockResponse
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from nose.tools import eq_, raises
from fudge import patch
def test_kwargs_parsing_valid():
"""Test that valid kwargs are stored as properties on the client."""
client = soundcloud.Client(client_id='foo', client_secret='foo')
assert isinstance(client, soundcloud.Client)
eq_('foo', client.client_id)
client = soundcloud.Client(client_id='foo', client_secret='bar',
access_token='baz', username='you',
password='secret', redirect_uri='foooo')
eq_('foo', client.client_id)
eq_('baz', client.access_token)
@raises(AttributeError)
def test_kwargs_parsing_invalid():
"""Test that unknown kwargs are ignored."""
client = soundcloud.Client(foo='bar', client_id='bar')
client.foo
def test_url_creation():
"""Test that resources are turned into urls properly."""
client = soundcloud.Client(client_id='foo')
url = client._resolve_resource_name('tracks')
eq_('https://api.soundcloud.com/tracks', url)
url = client._resolve_resource_name('/tracks/')
eq_('https://api.soundcloud.com/tracks', url)
def test_url_creation_options():
"""Test that resource resolving works with different options."""
client = soundcloud.Client(client_id='foo', use_ssl=False)
client.host = 'soundcloud.dev'
url = client._resolve_resource_name('apps/132445')
eq_('http://soundcloud.dev/apps/132445', url)
def test_method_dispatching():
"""Test that getattr is doing right by us."""
client = soundcloud.Client(client_id='foo')
for method in ('get', 'post', 'put', 'delete', 'head'):
p = getattr(client, method)
eq_((method,), p.args)
eq_('_request', p.func.__name__)
def test_host_config():
"""We should be able to set the host on the client."""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev')
eq_('api.soundcloud.dev', client.host)
client = soundcloud.Client(client_id='foo')
eq_('api.soundcloud.com', client.host)
@patch('requests.get')
def test_disabling_ssl_verification(fake_get):
"""We should be able to disable ssl verification when we are in dev mode"""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev',
verify_ssl=False)
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url,
headers=headers,
verify=False,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@raises(AttributeError)
def test_method_dispatching_invalid_method():
"""Test that getattr raises an attributeerror if we give it garbage."""
client = soundcloud.Client(client_id='foo')
client.foo()
@patch('requests.get')
def test_method_dispatching_get_request_readonly(fake_get):
"""Test that calling client.get() results in a proper call
to the get function in the requests module with the provided
kwargs as the querystring.
"""
client = soundcloud.Client(client_id='foo')
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url, headers=headers, allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@patch('requests.post')
def test_method_dispatching_post_request(fake_post):
"""Test that calling client.post() results in a proper call
to the post function in the requests module.
TODO: Revise once read/write support has been added.
"""
<|fim▁hole|> expected_url = client._resolve_resource_name('tracks')
data = {
'client_id': 'foo'
}
headers = {
'User-Agent': soundcloud.USER_AGENT
}
(fake_post.expects_call()
.with_args(expected_url,
data=data,
headers=headers,
allow_redirects=True)
.returns(MockResponse("{}")))
client.post('tracks')
@patch('requests.get')
def test_proxy_servers(fake_request):
"""Test that providing a dictionary of proxy servers works."""
proxies = {
'http': 'myproxyserver:1234'
}
client = soundcloud.Client(client_id='foo', proxies=proxies)
expected_url = "%s?%s" % (
client._resolve_resource_name('me'),
urlencode({
'client_id': 'foo'
})
)
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_request.expects_call()
.with_args(expected_url,
headers=headers,
proxies=proxies,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('/me')<|fim▁end|> | client = soundcloud.Client(client_id='foo')
|
<|file_name|>change_request.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
from openerp.osv import osv, fields
class LeadToChangeRequestWizard(osv.TransientModel):
"""
wizard to convert a Lead into a Change Request and move the Mail Thread
"""
_name = "crm.lead2cr.wizard"
_inherit = 'crm.partner.binding'
_columns = {
"lead_id": fields.many2one(
"crm.lead", "Lead", domain=[("type", "=", "lead")]
),
# "project_id": fields.many2one("project.project", "Project"),
"change_category_id": fields.many2one(
"change.management.category", "Change Category"
),
}
_defaults = {
"lead_id": lambda self, cr, uid, context=None: context.get('active_id')
}
def action_lead_to_change_request(self, cr, uid, ids, context=None):
# get the wizards and models
wizards = self.browse(cr, uid, ids, context=context)
lead_obj = self.pool["crm.lead"]
cr_obj = self.pool["change.management.change"]
attachment_obj = self.pool['ir.attachment']
for wizard in wizards:
# get the lead to transform
lead = wizard.lead_id
partner = self._find_matching_partner(cr, uid, context=context)
if not partner and (lead.partner_name or lead.contact_name):
partner_ids = lead_obj.handle_partner_assignation(
cr, uid, [lead.id], context=context
)
partner = partner_ids[lead.id]
# create new change request
vals = {
"description": lead.name,
"description_event": lead.description,
"email_from": lead.email_from,
"project_id": lead.project_id.id,
"stakeholder_id": partner,
"author_id": uid,
"change_category_id": wizard.change_category_id.id,
}
change_id = cr_obj.create(cr, uid, vals, context=None)
change = cr_obj.browse(cr, uid, change_id, context=None)
# move the mail thread
lead_obj.message_change_thread(
cr, uid, lead.id, change_id,
"change.management.change", context=context
)
# Move attachments
attachment_ids = attachment_obj.search(
cr, uid,
[('res_model', '=', 'crm.lead'), ('res_id', '=', lead.id)],
context=context
)
attachment_obj.write(
cr, uid, attachment_ids,
{'res_model': 'change.management.change', 'res_id': change_id},
context=context
)
# Archive the lead
lead_obj.write(
cr, uid, [lead.id], {'active': False}, context=context)
# delete the lead
# lead_obj.unlink(cr, uid, [lead.id], context=None)
# return the action to go to the form view of the new CR
view_id = self.pool.get('ir.ui.view').search(
cr, uid,
[
('model', '=', 'change.management.change'),
('name', '=', 'change_form_view')<|fim▁hole|> )
return {
'name': 'CR created',
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'change.management.change',
'type': 'ir.actions.act_window',
'res_id': change_id,
'context': context
}<|fim▁end|> | ] |
<|file_name|>_runApp_Development_nodebug.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Project Bluebox
Copyright (C) <2015> <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
from mcm.Bluebox import app
from mcm.Bluebox import configuration
# socketio.run(
# app,
app.run(
host=configuration.my_bind_host,
port=int(configuration.my_endpoint_port),
debug=False,
threaded=True<|fim▁hole|>)<|fim▁end|> | |
<|file_name|>dict.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
uds.utils.dict
~~~~~~~~~~~~~~
Utility functions to parse string and others.
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL2, see LICENSE for more details.
"""
import copy
def override_dict(new, old):
"""Override old dict object with new one.
<|fim▁hole|> :return: Overridden result
:rtype: :attr:`object`
"""
if isinstance(new, dict):
merged = copy.deepcopy(old)
for key in new.keys():
if key in old:
merged[key] = override_dict(new[key], old[key])
else:
merged[key] = new[key]
return merged
else:
return new<|fim▁end|> | :param object new: New dict
:param object old: Nld dict |
<|file_name|>preview.js<|end_file_name|><|fim▁begin|>function loadTxt()
{<|fim▁hole|> }
function writeTitle()
{
document.write("<title>Anteprima</title>")
}<|fim▁end|> | document.getElementById("btnClose").value = "chiudi"; |
<|file_name|>imagelike.py<|end_file_name|><|fim▁begin|># Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy.display
from renpy.display.render import render, Render, Matrix2D
# This file contains displayables that are image-like, because they take
# up a rectangular area of the screen, and do not respond to input.
class Solid(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
A displayable that fills the area its assigned with `color`.
::
image white = Solid("#fff")
"""
def __init__(self, color, **properties):
super(Solid, self).__init__(**properties)
if color is not None:
self.color = renpy.easy.color(color)
else:
self.color = None
def visit(self):
return [ ]
def render(self, width, height, st, at):
color = self.color or self.style.color
rv = Render(width, height)
if color is None or width <= 0 or height <= 0:
return rv
SIZE = 10
if width < SIZE or height < SIZE:
tex = renpy.display.draw.solid_texture(width, height, color)
else:
tex = renpy.display.draw.solid_texture(SIZE, SIZE, color)
rv.forward = Matrix2D(1.0 * SIZE / width, 0, 0, 1.0 * SIZE / height)
rv.reverse = Matrix2D(1.0 * width / SIZE, 0, 0, 1.0 * height / SIZE)
rv.blit(tex, (0, 0))
return rv
class Frame(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
:args: (image, xborder, yborder, tile=False, **properties)
A displayable that resizes an image to fill the available area,
while preserving the width and height of its borders. is often
used as the background of a window or button.
.. figure:: frame_example.png
Using a frame to resize an image to double its size.
`image`
An image manipulator that will be resized by this frame.
`left`
The size of the border on the left side.
`top`
The size of the border on the top.
`right`
The size of the border on the right side. If None, defaults
to `left`.
`bottom`
The side of the border on the bottom. If None, defaults to `top`.
`tile`
If true, tiling is used to resize sections of the image,
rather than scaling.
::
# Resize the background of the text window if it's too small.
init python:
style.window.background = Frame("frame.png", 10, 10)
"""
__version__ = 1
def after_upgrade(self, version):
if version < 2:
self.left = self.xborder
self.right = self.xborder
self.top = self.yborder
self.bottom = self.yborder
def __init__(self, image, left, top, right=None, bottom=None, bilinear=True, tile=False, **properties):
super(Frame, self).__init__(**properties)
self.image = renpy.easy.displayable(image)
self.tile = tile
if right is None:
right = left
if bottom is None:
bottom = top
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def render(self, width, height, st, at):
crend = render(self.image, width, height, st, at)
sw, sh = crend.get_size()
sw = int(sw)
sh = int(sh)
dw = int(width)
dh = int(height)
bw = self.left + self.right
bh = self.top + self.bottom
xborder = min(bw, sw - 2, dw)
if xborder:
left = self.left * xborder / bw
right = self.right * xborder / bw
else:
left = 0
right = 0
yborder = min(bh, sh - 2, dh)
if yborder:
top = self.top * yborder / bh
bottom = self.bottom * yborder / bh
else:
top = 0
bottom = 0
if renpy.display.draw.info["renderer"] == "sw":
return self.sw_render(crend, dw, dh, left, top, right, bottom)
def draw(x0, x1, y0, y1):
# Compute the coordinates of the left, right, top, and
# bottom sides of the region, for both the source and
# destination surfaces.
# left side.
if x0 >= 0:
dx0 = x0
sx0 = x0
else:
dx0 = dw + x0
sx0 = sw + x0
# right side.
if x1 > 0:
dx1 = x1
sx1 = x1
else:
dx1 = dw + x1
sx1 = sw + x1
# top side.
if y0 >= 0:
dy0 = y0
sy0 = y0
else:
dy0 = dh + y0
sy0 = sh + y0
# bottom side
if y1 > 0:
dy1 = y1
sy1 = y1
else:
dy1 = dh + y1
sy1 = sh + y1
# Quick exit.
if sx0 == sx1 or sy0 == sy1:
return
# Compute sizes.
csw = sx1 - sx0
csh = sy1 - sy0
cdw = dx1 - dx0
cdh = dy1 - dy0
if csw <= 0 or csh <= 0 or cdh <= 0 or cdw <= 0:
return
# Get a subsurface.
cr = crend.subsurface((sx0, sy0, csw, csh))
# Scale or tile if we have to.
if csw != cdw or csh != cdh:
if self.tile:
newcr = Render(cdw, cdh)
newcr.clipping = True
for x in xrange(0, cdw, csw):
for y in xrange(0, cdh, csh):
newcr.blit(cr, (x, y))
cr = newcr
else:
newcr = Render(cdw, cdh)
newcr.forward = Matrix2D(1.0 * csw / cdw, 0, 0, 1.0 * csh / cdh)<|fim▁hole|> cr = newcr
# Blit.
rv.blit(cr, (dx0, dy0))
return
rv = Render(dw, dh)
self.draw_pattern(draw, left, top, right, bottom)
return rv
def draw_pattern(self, draw, left, top, right, bottom):
# Top row.
if top:
if left:
draw(0, left, 0, top)
draw(left, -right, 0, top)
if right:
draw(-right, 0, 0, top)
# Middle row.
if left:
draw(0, left, top, -bottom)
draw(left, -right, top, -bottom)
if right:
draw(-right, 0, top, -bottom)
# Bottom row.
if bottom:
if left:
draw(0, left, -bottom, 0)
draw(left, -right, -bottom, 0)
if right:
draw(-right, 0, -bottom, 0)
def sw_render(self, crend, dw, dh, left, top, right, bottom):
source = crend.render_to_texture(True)
sw, sh = source.get_size()
dest = renpy.display.swdraw.surface(dw, dh, True)
rv = dest
def draw(x0, x1, y0, y1):
# Compute the coordinates of the left, right, top, and
# bottom sides of the region, for both the source and
# destination surfaces.
# left side.
if x0 >= 0:
dx0 = x0
sx0 = x0
else:
dx0 = dw + x0
sx0 = sw + x0
# right side.
if x1 > 0:
dx1 = x1
sx1 = x1
else:
dx1 = dw + x1
sx1 = sw + x1
# top side.
if y0 >= 0:
dy0 = y0
sy0 = y0
else:
dy0 = dh + y0
sy0 = sh + y0
# bottom side
if y1 > 0:
dy1 = y1
sy1 = y1
else:
dy1 = dh + y1
sy1 = sh + y1
# Quick exit.
if sx0 == sx1 or sy0 == sy1 or dx1 <= dx0 or dy1 <= dy0:
return
# Compute sizes.
srcsize = (sx1 - sx0, sy1 - sy0)
dstsize = (int(dx1 - dx0), int(dy1 - dy0))
# Get a subsurface.
surf = source.subsurface((sx0, sy0, srcsize[0], srcsize[1]))
# Scale or tile if we have to.
if dstsize != srcsize:
if self.tile:
tilew, tileh = srcsize
dstw, dsth = dstsize
surf2 = renpy.display.pgrender.surface_unscaled(dstsize, surf)
for y in range(0, dsth, tileh):
for x in range(0, dstw, tilew):
surf2.blit(surf, (x, y))
surf = surf2
else:
surf2 = renpy.display.scale.real_transform_scale(surf, dstsize)
surf = surf2
# Blit.
dest.blit(surf, (dx0, dy0))
self.draw_pattern(draw, left, top, right, bottom)
rrv = renpy.display.render.Render(dw, dh)
rrv.blit(rv, (0, 0))
rrv.depends_on(crend)
# And, finish up.
return rrv
def visit(self):
return [ self.image ]<|fim▁end|> | newcr.reverse = Matrix2D(1.0 * cdw / csw, 0, 0, 1.0 * cdh / csh)
newcr.blit(cr, (0, 0))
|
<|file_name|>unread.js<|end_file_name|><|fim▁begin|>'use strict';
var async = require('async');
var nconf = require('nconf');
var querystring = require('querystring');
var meta = require('../meta');
var pagination = require('../pagination');
var user = require('../user');
var topics = require('../topics');
var plugins = require('../plugins');
var helpers = require('./helpers');
var unreadController = module.exports;
unreadController.get = function (req, res, next) {
var page = parseInt(req.query.page, 10) || 1;
var results;
var cid = req.query.cid;
var filter = req.query.filter || '';
var settings;
async.waterfall([
function (next) {
plugins.fireHook('filter:unread.getValidFilters', { filters: Object.assign({}, helpers.validFilters) }, next);
},
function (data, _next) {
if (!data.filters[filter]) {
return next();
}
async.parallel({
watchedCategories: function (next) {
helpers.getWatchedCategories(req.uid, cid, next);
},
settings: function (next) {
user.getSettings(req.uid, next);
},
}, _next);
},
function (_results, next) {
results = _results;
settings = results.settings;
var start = Math.max(0, (page - 1) * settings.topicsPerPage);
var stop = start + settings.topicsPerPage - 1;
var cutoff = req.session.unreadCutoff ? req.session.unreadCutoff : topics.unreadCutoff();
topics.getUnreadTopics({
cid: cid,
uid: req.uid,
start: start,
stop: stop,
filter: filter,
cutoff: cutoff,
}, next);
},
function (data, next) {
user.blocks.filter(req.uid, data.topics, function (err, filtered) {
data.topics = filtered;
next(err, data);
});
},
function (data) {
data.title = meta.config.homePageTitle || '[[pages:home]]';
data.pageCount = Math.max(1, Math.ceil(data.topicCount / settings.topicsPerPage));
data.pagination = pagination.create(page, data.pageCount, req.query);
if (settings.usePagination && (page < 1 || page > data.pageCount)) {
req.query.page = Math.max(1, Math.min(data.pageCount, page));
return helpers.redirect(res, '/unread?' + querystring.stringify(req.query));
}
data.categories = results.watchedCategories.categories;<|fim▁hole|> data.title = '[[pages:unread]]';
data.breadcrumbs = helpers.buildBreadcrumbs([{ text: '[[unread:title]]' }]);
}
data.filters = helpers.buildFilters('unread', filter, req.query);
data.selectedFilter = data.filters.find(function (filter) {
return filter && filter.selected;
});
res.render('unread', data);
},
], next);
};
unreadController.unreadTotal = function (req, res, next) {
var filter = req.query.filter || '';
async.waterfall([
function (next) {
plugins.fireHook('filter:unread.getValidFilters', { filters: Object.assign({}, helpers.validFilters) }, next);
},
function (data, _next) {
if (!data.filters[filter]) {
return next();
}
topics.getTotalUnread(req.uid, filter, _next);
},
function (data) {
res.json(data);
},
], next);
};<|fim▁end|> | data.allCategoriesUrl = 'unread' + helpers.buildQueryString('', filter, '');
data.selectedCategory = results.watchedCategories.selectedCategory;
data.selectedCids = results.watchedCategories.selectedCids;
if (req.originalUrl.startsWith(nconf.get('relative_path') + '/api/unread') || req.originalUrl.startsWith(nconf.get('relative_path') + '/unread')) { |
<|file_name|>typing_status.js<|end_file_name|><|fim▁begin|>var typing_status = require('js/typing_status');
function return_false() { return false; }
function return_true() { return true; }
function return_alice() { return "alice"; }
function return_bob() { return "bob"; }
function make_time(secs) {
// make times semi-realistic
return 1000000 + 1000 * secs;
}
function returns_time(secs) {
return function () { return make_time(secs); };
}
(function test_basics() {
// invalid conversation basically does nothing
var worker = {
get_recipient: return_alice,
is_valid_conversation: return_false,
};
typing_status.handle_text_input(worker);
// Start setting up more testing state.
typing_status.initialize_state();
var events = {};
function set_timeout(f, delay) {
assert.equal(delay, 5000);
events.idle_callback = f;
return 'idle_timer_stub';
}
function clear_timeout() {
events.timer_cleared = true;
}
global.patch_builtin('setTimeout', set_timeout);
global.patch_builtin('clearTimeout', clear_timeout);
function notify_server_start(recipient) {
assert.equal(recipient, "alice");
events.started = true;
}
function notify_server_stop(recipient) {
assert.equal(recipient, "alice");
events.stopped = true;
}
function clear_events() {
events.idle_callback = undefined;
events.started = false;
events.stopped = false;
events.timer_cleared = false;
}
function call_handler() {
clear_events();
typing_status.handle_text_input(worker);
}
function call_stop() {
clear_events();
typing_status.stop(worker);
}
worker = {
get_recipient: return_alice,
is_valid_conversation: return_true,
get_current_time: returns_time(5),
notify_server_start: notify_server_start,
notify_server_stop: notify_server_stop,
};
// Start talking to alice.
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(5 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: false,
timer_cleared: false,
});
assert(events.idle_callback);
// type again 3 seconds later
worker.get_current_time = returns_time(8);
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(5 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: false,
stopped: false,
timer_cleared: true,
});
assert(events.idle_callback);
// type after 15 secs, so that we can notify the server
// again
worker.get_current_time = returns_time(18);
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(18 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: false,
timer_cleared: true,
});
// Now call alice's idle callback that we captured earlier.
var callback = events.idle_callback;
clear_events();
callback();
assert.deepEqual(typing_status.state, {
next_send_start_time: undefined,
idle_timer: undefined,
current_recipient: undefined,
});
assert.deepEqual(events, {
idle_callback: undefined,
started: false,
stopped: true,
timer_cleared: true,
});
// Call stop with nothing going on.
call_stop();
assert.deepEqual(typing_status.state, {<|fim▁hole|> idle_timer: undefined,
current_recipient: undefined,
});
assert.deepEqual(events, {
idle_callback: undefined,
started: false,
stopped: false,
timer_cleared: false,
});
// Start talking to alice again.
worker.get_current_time = returns_time(50);
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(50 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: false,
timer_cleared: false,
});
assert(events.idle_callback);
// Explicitly stop alice.
call_stop();
assert.deepEqual(typing_status.state, {
next_send_start_time: undefined,
idle_timer: undefined,
current_recipient: undefined,
});
assert.deepEqual(events, {
idle_callback: undefined,
started: false,
stopped: true,
timer_cleared: true,
});
// Start talking to alice again.
worker.get_current_time = returns_time(80);
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(80 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: false,
timer_cleared: false,
});
assert(events.idle_callback);
// Switch to an invalid conversation.
worker.get_recipient = function () {
return 'not-alice';
};
worker.is_valid_conversation = return_false;
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: undefined,
idle_timer: undefined,
current_recipient: undefined,
});
assert.deepEqual(events, {
idle_callback: undefined,
started: false,
stopped: true,
timer_cleared: true,
});
// Switch to another invalid conversation.
worker.get_recipient = function () {
return 'another-bogus-one';
};
worker.is_valid_conversation = return_false;
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: undefined,
idle_timer: undefined,
current_recipient: undefined,
});
assert.deepEqual(events, {
idle_callback: undefined,
started: false,
stopped: false,
timer_cleared: false,
});
// Start talking to alice again.
worker.get_recipient = return_alice;
worker.is_valid_conversation = return_true;
worker.get_current_time = returns_time(170);
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(170 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'alice',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: false,
timer_cleared: false,
});
assert(events.idle_callback);
// Switch to bob now.
worker.get_recipient = return_bob;
worker.is_valid_conversation = return_true;
worker.get_current_time = returns_time(171);
worker.notify_server_start = function (recipient) {
assert.equal(recipient, "bob");
events.started = true;
};
call_handler();
assert.deepEqual(typing_status.state, {
next_send_start_time: make_time(171 + 10),
idle_timer: 'idle_timer_stub',
current_recipient: 'bob',
});
assert.deepEqual(events, {
idle_callback: events.idle_callback,
started: true,
stopped: true,
timer_cleared: true,
});
assert(events.idle_callback);
}());<|fim▁end|> | next_send_start_time: undefined, |
<|file_name|>SeedPeersTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2011 Micheal Swiggs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.reddcoin.net.discovery;
import com.google.reddcoin.params.MainNetParams;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
public class SeedPeersTest {
@Test
public void getPeer_one() throws Exception{
SeedPeers seedPeers = new SeedPeers(MainNetParams.get());
assertThat(seedPeers.getPeer(), notNullValue());
}
@Test
public void getPeer_all() throws Exception{
SeedPeers seedPeers = new SeedPeers(MainNetParams.get());
for(int i = 0; i < SeedPeers.seedAddrs.length; ++i){
assertThat("Failed on index: "+i, seedPeers.getPeer(), notNullValue());
}
assertThat(seedPeers.getPeer(), equalTo(null));
}
@Test
public void getPeers_length() throws Exception{
SeedPeers seedPeers = new SeedPeers(MainNetParams.get());<|fim▁hole|> assertThat(addresses.length, equalTo(SeedPeers.seedAddrs.length));
}
}<|fim▁end|> | InetSocketAddress[] addresses = seedPeers.getPeers(0, TimeUnit.SECONDS); |
<|file_name|>mapnik_datasource_cache.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2006 Artem Pavlenko, Jean-Francois Doyon
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
#include <boost/python.hpp>
#include <mapnik/datasource_cache.hpp>
namespace {
using namespace boost::python;
boost::shared_ptr<mapnik::datasource> create_datasource(const dict& d)
{
bool bind=true;
mapnik::parameters params;
boost::python::list keys=d.keys();
for (int i=0; i<len(keys); ++i)
{
std::string key = extract<std::string>(keys[i]);
object obj = d[key];
if (key == "bind")
{
bind = extract<bool>(obj)();
continue;
}
extract<std::string> ex0(obj);
extract<int> ex1(obj);
extract<double> ex2(obj);
if (ex0.check())
{
params[key] = ex0();
}
else if (ex1.check())
{
params[key] = ex1();
}
else if (ex2.check())
{
params[key] = ex2();
}
}
return mapnik::datasource_cache::instance().create(params, bind);
}
void register_datasources(std::string const& path)
{
mapnik::datasource_cache::instance().register_datasources(path);
}
std::vector<std::string> plugin_names()
{
return mapnik::datasource_cache::instance().plugin_names();
}
std::string plugin_directories()
{
return mapnik::datasource_cache::instance().plugin_directories();
}
}
void export_datasource_cache()<|fim▁hole|> .def("create",&create_datasource)
.staticmethod("create")
.def("register_datasources",®ister_datasources)
.staticmethod("register_datasources")
.def("plugin_names",&plugin_names)
.staticmethod("plugin_names")
.def("plugin_directories",&plugin_directories)
.staticmethod("plugin_directories")
;
}<|fim▁end|> | {
using mapnik::datasource_cache;
class_<datasource_cache,
boost::noncopyable>("DatasourceCache",no_init) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// c4puter embedded controller firmware
// Copyright (C) 2017 Chris Pavlina
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
//! Utilities for processing and displaying data
mod parseint;<|fim▁hole|>mod hexprint;
pub mod base64;
pub mod utf;
pub use self::parseint::ParseInt;
pub use self::hexprint::hexprint;<|fim▁end|> | |
<|file_name|>Kingsoft.py<|end_file_name|><|fim▁begin|># Kingsoft Antivirus
# CVE-NOMATCH
import logging
<|fim▁hole|>
def SetUninstallName(self, arg):
if len(arg) > 900:
log.ThugLogging.log_exploit_event(self._window.url,
"Kingsoft AntiVirus ActiveX",
"SetUninstallName Heap Overflow")<|fim▁end|> | log = logging.getLogger("Thug")
|
<|file_name|>process_monitor.py<|end_file_name|><|fim▁begin|>import warnings
from . import pedrpc
from .base_monitor import BaseMonitor
# Important: BaseMonitor needs to come *before* pedrpc.Client in the
# Inheritance list for the method resolution order to produce
# correct results.
class ProcessMonitor(BaseMonitor, pedrpc.Client):
"""
Proxy class for the process monitor interface.
In Versions < 0.2.0, boofuzz had network and process monitors
that communicated over RPC. The RPC client was directly passed
to the session class, and resolved all method calls dynamically
on the RPC partner.
Since 0.2.0, every monitor class must implement the abstract class
BaseMonitor, which defines a common interface among all Monitors. To
aid future typehinting efforts and to disambiguate Network- and Process Monitors,
this explicit proxy class has been introduced that
fast-forwards all calls to the RPC partner.
.. versionadded:: 0.2.0
"""
def __init__(self, host, port):
BaseMonitor.__init__(self)
pedrpc.Client.__init__(self, host, port)
self.server_options = {}
self.host = host
self.port = port
def alive(self):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("alive")
def pre_send(self, target=None, fuzz_data_logger=None, session=None):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("pre_send", session.total_mutant_index)
def post_send(self, target=None, fuzz_data_logger=None, session=None):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("post_send")
def set_options(self, *args, **kwargs):
"""
The old RPC interfaces specified set_foobar methods to set options.
As these vary by RPC implementation, this trampoline method translates
arguments that have been passed as keyword arguments to set_foobar calls.
If you call ``set_options(foobar="barbaz")``, it will result in a call to
``set_foobar("barbaz")`` on the RPC partner.
"""
# args will be ignored, kwargs will be translated
for arg, value in kwargs.items():
eval("self.__method_missing('set_{0}', kwargs['{0}'])".format(arg))
self.server_options.update(**kwargs)
def get_crash_synopsis(self):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("get_crash_synopsis")
def start_target(self):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("start_target")
def stop_target(self):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("stop_target")
def restart_target(self, target=None, fuzz_data_logger=None, session=None):
"""This method is forwarded to the RPC daemon."""
return self.__method_missing("restart_target")
def on_new_server(self, new_uuid):
"""Restores all set options to the RPC daemon if it has restarted since the last call."""
for key, val in self.server_options.items():
self.__hot_transmit(("set_{}".format(key), ((val,), {})))
def set_proc_name(self, new_proc_name):
""".. deprecated :: 0.2.0
This option should be set via ``set_options``.
"""
warnings.warn(
"This method is deprecated and will be removed in a future Version of boofuzz."
" Please use set_options(log_path=...) instead.",
FutureWarning,
)
return self.set_options(proc_name=new_proc_name)
def set_start_commands(self, new_start_commands):
""".. deprecated :: 0.2.0
This option should be set via ``set_options``.
"""
warnings.warn(
"This method is deprecated and will be removed in a future Version of boofuzz."
" Please use set_options(log_path=...) instead.",
FutureWarning,
)
return self.set_options(start_commands=new_start_commands)
def set_stop_commands(self, new_stop_commands):
""".. deprecated :: 0.2.0
This option should be set via ``set_options``.
"""
warnings.warn(
"This method is deprecated and will be removed in a future Version of boofuzz."
" Please use set_options(log_path=...) instead.",
FutureWarning,
)
return self.set_options(stop_commands=new_stop_commands)
def set_crash_filename(self, new_crash_filename):
""".. deprecated :: 0.2.0<|fim▁hole|> """
warnings.warn(
"This method is deprecated and will be removed in a future Version of boofuzz."
" Please use set_options(log_path=...) instead.",
FutureWarning,
)
return self.set_options(crash_filename=new_crash_filename)
def __repr__(self):
return "ProcessMonitor#{}[{}:{}]".format(id(self), self.host, self.port)<|fim▁end|> |
This option should be set via ``set_options``. |
<|file_name|>powerpc.rs<|end_file_name|><|fim▁begin|>use super::{InlineAsmArch, InlineAsmType};
use rustc_macros::HashStable_Generic;
use std::fmt;
def_reg_class! {
PowerPC PowerPCInlineAsmRegClass {
reg,
reg_nonzero,
freg,
cr,
xer,
}
}
impl PowerPCInlineAsmRegClass {
pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
&[]
}
pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
None
}
pub fn suggest_modifier(
self,
_arch: InlineAsmArch,
_ty: InlineAsmType,
) -> Option<(char, &'static str)> {
None
}
pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
None
}
pub fn supported_types(
self,
arch: InlineAsmArch,
) -> &'static [(InlineAsmType, Option<&'static str>)] {
match self {
Self::reg | Self::reg_nonzero => {
if arch == InlineAsmArch::PowerPC {
types! { _: I8, I16, I32; }
} else {
types! { _: I8, I16, I32, I64; }
}
}
Self::freg => types! { _: F32, F64; },
Self::cr | Self::xer => &[],
}
}
}
def_regs! {
PowerPC PowerPCInlineAsmReg PowerPCInlineAsmRegClass {
r0: reg = ["r0", "0"],
r3: reg, reg_nonzero = ["r3", "3"],
r4: reg, reg_nonzero = ["r4", "4"],
r5: reg, reg_nonzero = ["r5", "5"],
r6: reg, reg_nonzero = ["r6", "6"],
r7: reg, reg_nonzero = ["r7", "7"],
r8: reg, reg_nonzero = ["r8", "8"],
r9: reg, reg_nonzero = ["r9", "9"],
r10: reg, reg_nonzero = ["r10", "10"],
r11: reg, reg_nonzero = ["r11", "11"],
r12: reg, reg_nonzero = ["r12", "12"],
r14: reg, reg_nonzero = ["r14", "14"],
r15: reg, reg_nonzero = ["r15", "15"],
r16: reg, reg_nonzero = ["r16", "16"],
r17: reg, reg_nonzero = ["r17", "17"],
r18: reg, reg_nonzero = ["r18", "18"],
r19: reg, reg_nonzero = ["r19", "19"],
r20: reg, reg_nonzero = ["r20", "20"],
r21: reg, reg_nonzero = ["r21", "21"],
r22: reg, reg_nonzero = ["r22", "22"],
r23: reg, reg_nonzero = ["r23", "23"],
r24: reg, reg_nonzero = ["r24", "24"],
r25: reg, reg_nonzero = ["r25", "25"],
r26: reg, reg_nonzero = ["r26", "26"],
r27: reg, reg_nonzero = ["r27", "27"],
r28: reg, reg_nonzero = ["r28", "28"],
f0: freg = ["f0", "fr0"],
f1: freg = ["f1", "fr1"],
f2: freg = ["f2", "fr2"],
f3: freg = ["f3", "fr3"],
f4: freg = ["f4", "fr4"],
f5: freg = ["f5", "fr5"],
f6: freg = ["f6", "fr6"],
f7: freg = ["f7", "fr7"],
f8: freg = ["f8", "fr8"],
f9: freg = ["f9", "fr9"],
f10: freg = ["f10", "fr10"],
f11: freg = ["f11", "fr11"],
f12: freg = ["f12", "fr12"],
f13: freg = ["f13", "fr13"],
f14: freg = ["f14", "fr14"],
f15: freg = ["f15", "fr15"],
f16: freg = ["f16", "fr16"],
f17: freg = ["f17", "fr17"],
f18: freg = ["f18", "fr18"],
f19: freg = ["f19", "fr19"],
f20: freg = ["f20", "fr20"],
f21: freg = ["f21", "fr21"],
f22: freg = ["f22", "fr22"],
f23: freg = ["f23", "fr23"],
f24: freg = ["f24", "fr24"],
f25: freg = ["f25", "fr25"],
f26: freg = ["f26", "fr26"],
f27: freg = ["f27", "fr27"],
f28: freg = ["f28", "fr28"],<|fim▁hole|> f30: freg = ["f30", "fr30"],
f31: freg = ["f31", "fr31"],
cr: cr = ["cr"],
cr0: cr = ["cr0"],
cr1: cr = ["cr1"],
cr2: cr = ["cr2"],
cr3: cr = ["cr3"],
cr4: cr = ["cr4"],
cr5: cr = ["cr5"],
cr6: cr = ["cr6"],
cr7: cr = ["cr7"],
xer: xer = ["xer"],
#error = ["r1", "1", "sp"] =>
"the stack pointer cannot be used as an operand for inline asm",
#error = ["r2", "2"] =>
"r2 is a system reserved register and cannot be used as an operand for inline asm",
#error = ["r13", "13"] =>
"r13 is a system reserved register and cannot be used as an operand for inline asm",
#error = ["r29", "29"] =>
"r29 is used internally by LLVM and cannot be used as an operand for inline asm",
#error = ["r30", "30"] =>
"r30 is used internally by LLVM and cannot be used as an operand for inline asm",
#error = ["r31", "31", "fp"] =>
"the frame pointer cannot be used as an operand for inline asm",
#error = ["lr"] =>
"the link register cannot be used as an operand for inline asm",
#error = ["ctr"] =>
"the counter register cannot be used as an operand for inline asm",
#error = ["vrsave"] =>
"the vrsave register cannot be used as an operand for inline asm",
}
}
impl PowerPCInlineAsmReg {
pub fn emit(
self,
out: &mut dyn fmt::Write,
_arch: InlineAsmArch,
_modifier: Option<char>,
) -> fmt::Result {
macro_rules! do_emit {
(
$($(($reg:ident, $value:literal)),*;)*
) => {
out.write_str(match self {
$($(Self::$reg => $value,)*)*
})
};
}
// Strip off the leading prefix.
do_emit! {
(r0, "0"), (r3, "3"), (r4, "4"), (r5, "5"), (r6, "6"), (r7, "7");
(r8, "8"), (r9, "9"), (r10, "10"), (r11, "11"), (r12, "12"), (r14, "14"), (r15, "15");
(r16, "16"), (r17, "17"), (r18, "18"), (r19, "19"), (r20, "20"), (r21, "21"), (r22, "22"), (r23, "23");
(r24, "24"), (r25, "25"), (r26, "26"), (r27, "27"), (r28, "28");
(f0, "0"), (f1, "1"), (f2, "2"), (f3, "3"), (f4, "4"), (f5, "5"), (f6, "6"), (f7, "7");
(f8, "8"), (f9, "9"), (f10, "10"), (f11, "11"), (f12, "12"), (f13, "13"), (f14, "14"), (f15, "15");
(f16, "16"), (f17, "17"), (f18, "18"), (f19, "19"), (f20, "20"), (f21, "21"), (f22, "22"), (f23, "23");
(f24, "24"), (f25, "25"), (f26, "26"), (f27, "27"), (f28, "28"), (f29, "29"), (f30, "30"), (f31, "31");
(cr, "cr");
(cr0, "0"), (cr1, "1"), (cr2, "2"), (cr3, "3"), (cr4, "4"), (cr5, "5"), (cr6, "6"), (cr7, "7");
(xer, "xer");
}
}
pub fn overlapping_regs(self, mut cb: impl FnMut(PowerPCInlineAsmReg)) {
macro_rules! reg_conflicts {
(
$(
$full:ident : $($field:ident)*
),*;
) => {
match self {
$(
Self::$full => {
cb(Self::$full);
$(cb(Self::$field);)*
}
$(Self::$field)|* => {
cb(Self::$full);
cb(self);
}
)*
r => cb(r),
}
};
}
reg_conflicts! {
cr : cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7;
}
}
}<|fim▁end|> | f29: freg = ["f29", "fr29"], |
<|file_name|>multiprocessTask.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding: utf-8
"""
multiprocessTask.py
~~~~~~~~~~~~~~~~~~~
a multiprocess model of producer/consumer
task = Task(work_func, 1, 3, counter=0, a='', callback=cb)
results = task.run()
for i in xrange(26):
lines = ["%d" % i] * random.randint(10, 20)
task.put(lines)
<|fim▁hole|>import time
from multiprocessing import Pool as ProcessPool, Manager, cpu_count
__all__ = ['Producer', 'Consumer', 'Task']
class Callable(object):
def __call__(self, *args, **kwargs):
raise NotImplementedError('%s not callable' % self)
def run(self, *args, **kwargs):
raise NotImplementedError('%s.run() not implemented' % self)
class Producer(Callable):
def __init__(self, todo_list=None, max_qsize=None):
manager = Manager()
self._q = manager.Queue()
self._q_lock = manager.Lock()
self._q_close_event = manager.Event()
self._max_qsize = max_qsize or 0
todo_list = todo_list or []
if isinstance(todo_list, (list, tuple)) and len(todo_list) > 0:
self.put(todo_list)
super(Producer, self).__init__()
@property
def q_size(self):
return self._q.qsize()
def __call__(self, q, lock, close_event, *args, **kwargs):
for i, data in enumerate(self.run()):
with lock:
q.put(data)
print 'pid %s put %d: %s' % (os.getpid(), i, data)
def run(self):
while 1:
with self._q_lock:
if self._q.empty():
if self._q_close_event.is_set():
break
else:
time.sleep(0.01)
continue
yield self._q.get()
def put(self, *todos):
for todo in todos:
with self._q_lock:
self._q.put(todo)
def finish(self):
try:
self._q_close_event.set()
except Exception as e:
print e
class Consumer(Callable):
def __init__(self, fn=None):
self._fn = fn
self.results = []
super(Consumer, self).__init__()
def __call__(self, q, lock, close_event, *args, **kwargs):
while 1:
with lock:
if q.empty():
if close_event.is_set():
break
else:
time.sleep(0.01)
continue
data = q.get()
self.results.append(self.run(data, *args, **kwargs))
return self.results
def run(self, data, *args, **kwargs):
if self._fn:
return self._fn(data, *args, **kwargs)
class Task(object):
"""
a multiprocess model of producer/consumer
"""
def __init__(self, fn,
producer_count=None,
consumer_count=None,
callback=None,
batch=True,
counter=None,
**shared
):
"""
init producer/consumer task
Args:
fn: consumer called func(data, counter, q_size, *args, **shared_vars)
producer_count: producer process count, default: 1
consumer_count: consumer process count, default: cpu_count - 1
callback: callback func after f calling completed
batch: if True, `task.put(todo_list)` 'todo_list' will be do all at once in batches;
False, todo_list will be do one by one
counter: process shared counter, need custom imp in <fn>
**shared: process shared object data
"""
cpus = cpu_count()
if producer_count is None or producer_count < 1 or producer_count > cpu_count():
producer_count = 1
if consumer_count is None or consumer_count < 1 or consumer_count > cpu_count():
consumer_count = cpus - 1
print 'producer_count=%s consumer_count=%s' % (producer_count, consumer_count)
self._callback = callback
self.batch = batch
manager = Manager()
self.q = manager.Queue()
self.lock = manager.Lock()
self.event = manager.Event()
self._counter = manager.Value('counter', counter or 0)
self._shared = {var_name: manager.Value(var_name, var_value) for var_name, var_value in shared.iteritems()}
self.producerProcessList = [Producer() for _ in xrange(producer_count)]
self.consumerProcessList = [Consumer(fn=fn) for _ in xrange(consumer_count)]
self.pool = ProcessPool(consumer_count + producer_count)
@property
def q_size(self):
return self.q.qsize() + sum([x.q_size or 0 for x in self.producerProcessList])
@property
def counter(self):
return self._counter.value
@property
def shared(self):
return {var_name: var_value_proxy.value for var_name, var_value_proxy in self._shared.iteritems()}
def put(self, todo_list):
producer = self.producerProcessList.pop(0)
if self.batch:
producer.put(todo_list)
else:
producer.put(*todo_list)
self.producerProcessList.append(producer)
time.sleep(0.01)
def run(self, *args, **kwargs):
results = []
arg = (self.q, self.lock, self.event, self._counter, self.q_size)
kwargs.update(self._shared)
for producer in self.producerProcessList:
self.pool.apply_async(producer, arg + args, kwargs)
for consumer in self.consumerProcessList:
results.append(self.pool.apply_async(consumer, arg + args, kwargs, self._cb))
return results
def _cb(self, *args, **kwargs):
if self._callback:
self._callback(self.counter, self._shared)
def finish(self):
for producer in self.producerProcessList:
producer.finish()
self.pool.close()
time.sleep(0.03)
self.event.set()
self.pool.join()
# def work(data, counter, *args, **kwargs):
# pid = os.getpid()
# print '%s doing %s' % (pid, data)
# # counter = args[0] if len(args) > 0 else None
# if counter:
# counter.value += 1
# kwargs['var_a'].value += chr(len(kwargs['var_a'].value) + 65)
# return '%s result' % pid
#
#
# def cb(*args, **kwargs):
# print 'callback', args, kwargs
#
#
# def test():
# import random
# n = 0
# task = Task(work, 1, 3, counter=n, var_a='', callback=cb)
# results = task.run()
# for i in xrange(26):
# lines = ["%d" % i] * random.randint(10, 20)
# task.put(lines)
#
# task.finish()
#
# print 'end counter', task.counter
# print 'shared.var_a', task.shared['var_a']
# print 'results:\n' + '\n'.join([str(res.get()) for res in results])
#
# if __name__ == '__main__':
# test()<|fim▁end|> | task.finish()
"""
import os |
<|file_name|>file_test.go<|end_file_name|><|fim▁begin|>package commitstats
import (
"testing"
fixtures "github.com/src-d/go-git-fixtures"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
)
func TestNewFileStats(t *testing.T) {<|fim▁hole|> require.NoError(fixtures.Clean())
}()
f := fixtures.Basic().One()
r, err := git.Open(filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()), nil)
require.NoError(err)
b, err := r.BlobObject(plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492"))
require.NoError(err)
fs, err := newFileStats(b, "PHP")
require.NoError(err)
require.Equal(17, fs["}"].Count)
require.Equal(Code, fs["}"].Kind)
require.Equal(10, fs["*/"].Count)
require.Equal(Comment, fs["*/"].Kind)
}
func TestCalculateByFile(t *testing.T) {
defer func() {
require.NoError(t, fixtures.Clean())
}()
tests := map[string]struct {
fixture *fixtures.Fixture
from plumbing.Hash
to plumbing.Hash
expected interface{}
}{
"basic": {
fixture: fixtures.ByURL("https://github.com/src-d/go-git.git").One(),
to: plumbing.NewHash("d2d68d3413353bd4bf20891ac1daa82cd6e00fb9"),
expected: []CommitFileStats{
{
Path: "common_test.go",
Language: "Go",
Blank: KindStats{Deletions: 1},
Total: KindStats{Deletions: 1},
},
{
Path: "core/storage.go",
Language: "Go",
Code: KindStats{Additions: 1},
Total: KindStats{Additions: 1},
},
{
Path: "fixtures/data/pack-a3fed42da1e8189a077c0e6846c040dcf73fc9dd.idx",
},
{
Path: "fixtures/data/pack-a3fed42da1e8189a077c0e6846c040dcf73fc9dd.pack",
},
{
Path: "fixtures/data/pack-c544593473465e6315ad4182d04d366c4592b829.idx",
},
{
Path: "fixtures/data/pack-c544593473465e6315ad4182d04d366c4592b829.pack",
},
{
Path: "fixtures/data/pack-f2e0a8889a746f7600e07d2246a2e29a72f696be.idx",
},
{
Path: "fixtures/data/pack-f2e0a8889a746f7600e07d2246a2e29a72f696be.pack",
},
{
Path: "fixtures/fixtures.go",
Language: "Go",
Code: KindStats{Additions: 83},
Blank: KindStats{Additions: 19},
Total: KindStats{Additions: 102},
},
{
Path: "formats/idxfile/decoder.go",
Language: "Go",
Code: KindStats{Additions: 3, Deletions: 1},
Blank: KindStats{Deletions: 1},
Total: KindStats{Additions: 3, Deletions: 2},
},
{
Path: "formats/idxfile/decoder_test.go",
Language: "Go",
Code: KindStats{Additions: 31, Deletions: 11},
Blank: KindStats{Additions: 7},
Total: KindStats{Additions: 38, Deletions: 11},
},
{
Path: "formats/idxfile/encoder.go",
Language: "Go",
Code: KindStats{Additions: 8, Deletions: 9},
Total: KindStats{Additions: 8, Deletions: 9},
},
{
Path: "formats/idxfile/encoder_test.go",
Language: "Go",
Code: KindStats{Additions: 16, Deletions: 27},
Comment: KindStats{Deletions: 0},
Blank: KindStats{Deletions: 3},
Other: KindStats{Deletions: 0},
Total: KindStats{Additions: 16,
Deletions: 30},
},
{
Path: "formats/idxfile/fixtures/git-fixture.idx",
},
{
Path: "formats/idxfile/idxfile.go",
Language: "Go",
Code: KindStats{Additions: 8, Deletions: 1},
Blank: KindStats{Additions: 1},
Total: KindStats{Additions: 9, Deletions: 1},
},
{
Path: "formats/packfile/decoder.go",
Language: "Go",
Code: KindStats{Additions: 56, Deletions: 70},
Comment: KindStats{Additions: 2, Deletions: 9},
Blank: KindStats{Deletions: 4},
Total: KindStats{Additions: 58, Deletions: 83},
},
{
Path: "formats/packfile/decoder_test.go",
Language: "Go",
Code: KindStats{Additions: 23, Deletions: 45},
Blank: KindStats{Deletions: 3},
Total: KindStats{Additions: 23, Deletions: 48},
},
{
Path: "formats/packfile/parser.go",
Language: "Go",
Code: KindStats{Additions: 53, Deletions: 15},
Blank: KindStats{Additions: 9},
Total: KindStats{Additions: 62, Deletions: 15},
},
{
Path: "formats/packfile/parser_test.go",
Language: "Go",
Code: KindStats{Additions: 91, Deletions: 59},
Comment: KindStats{Deletions: 328},
Blank: KindStats{Deletions: 53},
Total: KindStats{Additions: 91, Deletions: 440},
},
{
Path: "storage/filesystem/internal/dotgit/dotgit.go",
Language: "Go",
Code: KindStats{Additions: 23, Deletions: 22},
Blank: KindStats{Additions: 2},
Total: KindStats{Additions: 25, Deletions: 22},
},
{
Path: "storage/filesystem/internal/index/index.go",
Language: "Go",
Code: KindStats{Additions: 8, Deletions: 4},
Total: KindStats{Additions: 8, Deletions: 4},
},
{
Path: "storage/filesystem/object.go",
Language: "Go",
Code: KindStats{Additions: 3},
Blank: KindStats{Additions: 1},
Total: KindStats{Additions: 4},
},
{
Path: "storage/memory/storage.go",
Language: "Go",
Code: KindStats{Additions: 7},
Blank: KindStats{Additions: 3},
Total: KindStats{Additions: 10},
},
},
},
"orphan": {
fixture: fixtures.Basic().One(),
to: plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"),
expected: []CommitFileStats{
{
Path: "LICENSE",
Language: "Text",
Other: KindStats{Additions: 22},
Total: KindStats{Additions: 22},
},
},
},
"other": {
fixture: fixtures.Basic().One(),
to: plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"),
expected: []CommitFileStats{
{
Path: "CHANGELOG",
Other: KindStats{Additions: 1},
Total: KindStats{Additions: 1},
},
},
},
"binary": {
fixture: fixtures.Basic().One(),
to: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
expected: []CommitFileStats{{Path: "binary.jpg"}},
},
"vendor": {
fixture: fixtures.Basic().One(),
to: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
expected: ([]CommitFileStats)(nil),
},
"with_from": {
fixture: fixtures.Basic().One(),
to: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
from: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
expected: ([]CommitFileStats)(nil),
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
require := require.New(t)
r, err := git.Open(filesystem.NewStorage(test.fixture.DotGit(), cache.NewObjectLRUDefault()), nil)
require.NoError(err)
to, err := r.CommitObject(test.to)
require.NoError(err)
var from *object.Commit
if !test.from.IsZero() {
from, err = r.CommitObject(test.from)
require.NoError(err)
}
stats, err := CalculateByFile(r, from, to)
require.NoError(err)
assert.Equal(t, test.expected, stats)
})
}
}<|fim▁end|> | require := require.New(t)
defer func() { |
<|file_name|>neural_net_tester.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2.5
#
# Unit tester for neural_net.py
#
import sys
from neural_net import train, test,\
make_neural_net_basic,\
make_neural_net_two_layer,\
make_neural_net_challenging,\
make_neural_net_with_weights
from neural_net_data import simple_data_sets,\
harder_data_sets,\
challenging_data_sets,\
manual_weight_data_sets,\
all_data_sets
def main(neural_net_func, data_sets, max_iterations=10000):
verbose = True
for name, training_data, test_data in data_sets:
print "-"*40
print "Training on %s data" %(name)
nn = neural_net_func()
train(nn, training_data, max_iterations=max_iterations,
verbose=verbose)
print "Trained weights:"
for w in nn.weights:
print "Weight '%s': %f"%(w.get_name(),w.get_value())
print "Testing on %s test-data" %(name)
result = test(nn, test_data, verbose=verbose)<|fim▁hole|>if __name__=="__main__":
test_names = ["simple"]
if len(sys.argv) > 1:
test_names = sys.argv[1:]
for test_name in test_names:
if test_name == "simple":
# these test simple logical configurations
main(make_neural_net_basic,
simple_data_sets)
elif test_name == "two_layer":
# these test cases are slightly harder
main(make_neural_net_two_layer,
simple_data_sets + harder_data_sets)
elif test_name == "challenging":
# these tests require a more complex architecture.
main(make_neural_net_challenging, challenging_data_sets)
elif test_name == "patchy":
# patchy problem is slightly tricky
# unless your network gets the right weights.
# it can quickly get stuck in local maxima.
main(make_neural_net_challenging, manual_weight_data_sets)
elif test_name == "weights":
# if you set the 'right' weights for
# the patchy problem it can converge very quickly.
main(make_neural_net_with_weights, manual_weight_data_sets,100)
else:
print "unrecognized test name %s" %(test_name)<|fim▁end|> | print "Accuracy: %f"%(result)
|
<|file_name|>couchdb_indexer.go<|end_file_name|><|fim▁begin|>package vfs
import (
"encoding/json"
"os"
"path"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/couchdb/mango"
)
type couchdbIndexer struct {
db couchdb.Database
}
// NewCouchdbIndexer creates an Indexer instance based on couchdb to store
// files and directories metadata and index them.
func NewCouchdbIndexer(db couchdb.Database) Indexer {
return &couchdbIndexer{
db: db,
}
}<|fim▁hole|> Type: consts.DirType,
DocID: consts.RootDirID,
Fullpath: "/",
DirID: "",
})
if err != nil {
return err
}
err = couchdb.CreateNamedDocWithDB(c.db, &DirDoc{
DocName: path.Base(TrashDirName),
Type: consts.DirType,
DocID: consts.TrashDirID,
Fullpath: TrashDirName,
DirID: consts.RootDirID,
})
if err != nil && !couchdb.IsConflictError(err) {
return err
}
return nil
}
func (c *couchdbIndexer) DiskUsage() (int64, error) {
var doc couchdb.ViewResponse
err := couchdb.ExecView(c.db, consts.DiskUsageView, &couchdb.ViewRequest{
Reduce: true,
}, &doc)
if err != nil {
return 0, err
}
if len(doc.Rows) == 0 {
return 0, nil
}
// Reduce of _count should give us a number value
f64, ok := doc.Rows[0].Value.(float64)
if !ok {
return 0, ErrWrongCouchdbState
}
return int64(f64), nil
}
func (c *couchdbIndexer) CreateFileDoc(doc *FileDoc) error {
// Ensure that fullpath is filled because it's used in realtime/@events
if _, err := doc.Path(c); err != nil {
return err
}
return couchdb.CreateDoc(c.db, doc)
}
func (c *couchdbIndexer) CreateNamedFileDoc(doc *FileDoc) error {
// Ensure that fullpath is filled because it's used in realtime/@events
if _, err := doc.Path(c); err != nil {
return err
}
return couchdb.CreateNamedDoc(c.db, doc)
}
func (c *couchdbIndexer) UpdateFileDoc(olddoc, newdoc *FileDoc) error {
// Ensure that fullpath is filled because it's used in realtime/@events
if _, err := olddoc.Path(c); err != nil {
return err
}
if _, err := newdoc.Path(c); err != nil {
return err
}
newdoc.SetID(olddoc.ID())
newdoc.SetRev(olddoc.Rev())
return couchdb.UpdateDoc(c.db, newdoc)
}
func (c *couchdbIndexer) UpdateFileDocs(docs []*FileDoc) error {
if len(docs) == 0 {
return nil
}
// Ensure that fullpath is filled because it's used in realtime/@events
couchdocs := make([]interface{}, len(docs))
for i, doc := range docs {
if _, err := doc.Path(c); err != nil {
return err
}
couchdocs[i] = doc
}
return couchdb.BulkUpdateDocs(c.db, consts.Files, couchdocs)
}
func (c *couchdbIndexer) DeleteFileDoc(doc *FileDoc) error {
// Ensure that fullpath is filled because it's used in realtime/@events
if _, err := doc.Path(c); err != nil {
return err
}
return couchdb.DeleteDoc(c.db, doc)
}
func (c *couchdbIndexer) CreateDirDoc(doc *DirDoc) error {
return couchdb.CreateDoc(c.db, doc)
}
func (c *couchdbIndexer) CreateNamedDirDoc(doc *DirDoc) error {
return couchdb.CreateNamedDoc(c.db, doc)
}
func (c *couchdbIndexer) UpdateDirDoc(olddoc, newdoc *DirDoc) error {
newdoc.SetID(olddoc.ID())
newdoc.SetRev(olddoc.Rev())
if newdoc.Fullpath != olddoc.Fullpath {
if err := c.moveDir(olddoc.Fullpath, newdoc.Fullpath); err != nil {
return err
}
}
return couchdb.UpdateDoc(c.db, newdoc)
}
func (c *couchdbIndexer) DeleteDirDoc(doc *DirDoc) error {
return couchdb.DeleteDoc(c.db, doc)
}
func (c *couchdbIndexer) moveDir(oldpath, newpath string) error {
var children []*DirDoc
sel := mango.StartWith("path", oldpath+"/")
req := &couchdb.FindRequest{
UseIndex: "dir-by-path",
Selector: sel,
}
err := couchdb.FindDocs(c.db, consts.Files, req, &children)
if err != nil || len(children) == 0 {
return err
}
couchdocs := make([]interface{}, len(children))
for i, child := range children {
child.Fullpath = path.Join(newpath, child.Fullpath[len(oldpath)+1:])
couchdocs[i] = child
}
return couchdb.BulkUpdateDocs(c.db, consts.Files, couchdocs)
}
func (c *couchdbIndexer) DirByID(fileID string) (*DirDoc, error) {
doc := &DirDoc{}
err := couchdb.GetDoc(c.db, consts.Files, fileID, doc)
if couchdb.IsNotFoundError(err) {
err = os.ErrNotExist
}
if err != nil {
if fileID == consts.RootDirID {
panic("Root directory is not in database")
}
if fileID == consts.TrashDirID {
panic("Trash directory is not in database")
}
return nil, err
}
if doc.Type != consts.DirType {
return nil, os.ErrNotExist
}
return doc, err
}
func (c *couchdbIndexer) DirByPath(name string) (*DirDoc, error) {
if !path.IsAbs(name) {
return nil, ErrNonAbsolutePath
}
var docs []*DirDoc
sel := mango.Equal("path", path.Clean(name))
req := &couchdb.FindRequest{
UseIndex: "dir-by-path",
Selector: sel,
Limit: 1,
}
err := couchdb.FindDocs(c.db, consts.Files, req, &docs)
if err != nil {
return nil, err
}
if len(docs) == 0 {
if name == "/" {
panic("Root directory is not in database")
}
return nil, os.ErrNotExist
}
return docs[0], nil
}
func (c *couchdbIndexer) FileByID(fileID string) (*FileDoc, error) {
doc := &FileDoc{}
err := couchdb.GetDoc(c.db, consts.Files, fileID, doc)
if couchdb.IsNotFoundError(err) {
return nil, os.ErrNotExist
}
if err != nil {
return nil, err
}
if doc.Type != consts.FileType {
return nil, os.ErrNotExist
}
return doc, nil
}
func (c *couchdbIndexer) FileByPath(name string) (*FileDoc, error) {
if !path.IsAbs(name) {
return nil, ErrNonAbsolutePath
}
parent, err := c.DirByPath(path.Dir(name))
if err != nil {
return nil, err
}
// consts.FilesByParentView keys are [parentID, type, name]
var res couchdb.ViewResponse
err = couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{
Key: []string{parent.DocID, consts.FileType, path.Base(name)},
IncludeDocs: true,
}, &res)
if err != nil {
return nil, err
}
if len(res.Rows) == 0 {
return nil, os.ErrNotExist
}
var fdoc FileDoc
err = json.Unmarshal(*res.Rows[0].Doc, &fdoc)
return &fdoc, err
}
func (c *couchdbIndexer) FilePath(doc *FileDoc) (string, error) {
var parentPath string
if doc.DirID == consts.RootDirID {
parentPath = "/"
} else if doc.DirID == consts.TrashDirID {
parentPath = TrashDirName
} else {
parent, err := c.DirByID(doc.DirID)
if err != nil {
return "", ErrParentDoesNotExist
}
parentPath = parent.Fullpath
}
return path.Join(parentPath, doc.DocName), nil
}
func (c *couchdbIndexer) DirOrFileByID(fileID string) (*DirDoc, *FileDoc, error) {
dirOrFile := &DirOrFileDoc{}
err := couchdb.GetDoc(c.db, consts.Files, fileID, dirOrFile)
if err != nil {
return nil, nil, err
}
dirDoc, fileDoc := dirOrFile.Refine()
return dirDoc, fileDoc, nil
}
func (c *couchdbIndexer) DirOrFileByPath(name string) (*DirDoc, *FileDoc, error) {
dirDoc, err := c.DirByPath(name)
if err != nil && !os.IsNotExist(err) {
return nil, nil, err
}
if err == nil {
return dirDoc, nil, nil
}
fileDoc, err := c.FileByPath(name)
if err != nil && !os.IsNotExist(err) {
return nil, nil, err
}
if err == nil {
return nil, fileDoc, nil
}
return nil, nil, err
}
func (c *couchdbIndexer) DirIterator(doc *DirDoc, opts *IteratorOptions) DirIterator {
return NewIterator(c.db, doc, opts)
}
func (c *couchdbIndexer) DirBatch(doc *DirDoc, cursor couchdb.Cursor) ([]DirOrFileDoc, error) {
// consts.FilesByParentView keys are [parentID, type, name]
req := couchdb.ViewRequest{
StartKey: []string{doc.DocID, ""},
EndKey: []string{doc.DocID, couchdb.MaxString},
IncludeDocs: true,
}
var res couchdb.ViewResponse
cursor.ApplyTo(&req)
err := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)
if err != nil {
return nil, err
}
cursor.UpdateFrom(&res)
docs := make([]DirOrFileDoc, len(res.Rows))
for i, row := range res.Rows {
var doc DirOrFileDoc
err := json.Unmarshal(*row.Doc, &doc)
if err != nil {
return nil, err
}
docs[i] = doc
}
return docs, nil
}
func (c *couchdbIndexer) DirLength(doc *DirDoc) (int, error) {
req := couchdb.ViewRequest{
StartKey: []string{doc.DocID, ""},
EndKey: []string{doc.DocID, couchdb.MaxString},
Reduce: true,
GroupLevel: 1,
}
var res couchdb.ViewResponse
err := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)
if err != nil {
return 0, err
}
if len(res.Rows) == 0 {
return 0, nil
}
// Reduce of _count should give us a number value
f64, ok := res.Rows[0].Value.(float64)
if !ok {
return 0, ErrWrongCouchdbState
}
return int(f64), nil
}
func (c *couchdbIndexer) DirChildExists(dirID, name string) (bool, error) {
var res couchdb.ViewResponse
// consts.FilesByParentView keys are [parentID, type, name]
err := couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{
Keys: []interface{}{
[]string{dirID, consts.FileType, name},
[]string{dirID, consts.DirType, name},
},
Reduce: true,
Group: true,
}, &res)
if err != nil {
return false, err
}
if len(res.Rows) == 0 {
return false, nil
}
// Reduce of _count should give us a number value
f64, ok := res.Rows[0].Value.(float64)
if !ok {
return false, ErrWrongCouchdbState
}
return int(f64) > 0, nil
}<|fim▁end|> |
func (c *couchdbIndexer) InitIndex() error {
err := couchdb.CreateNamedDocWithDB(c.db, &DirDoc{
DocName: "", |
<|file_name|>testReturnPlanParser.cpp<|end_file_name|><|fim▁begin|>/* DMCS -- Distributed Nonmonotonic Multi-Context Systems.
* Copyright (C) 2009, 2010 Minh Dao-Tran, Thomas Krennwallner
*
* This file is part of DMCS.
*
* DMCS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* DMCS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with DMCS. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file testReturnPlanParser.cpp
* @author Minh Dao-Tran <[email protected]>
* @date Tue Oct 23 17:49:26 2012
*
* @brief
*
*
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "testReturnPlanParser"
#include <boost/test/unit_test.hpp>
#include "mcs/BeliefStateOffset.h"
#include "parser/ReturnPlanGrammar.hpp"
#include "parser/ReturnPlanGrammar.tcc"
#include "parser/Parser.hpp"
#include "parser/Parser.tcc"
#include <sstream>
using namespace dmcs;
BOOST_AUTO_TEST_CASE ( testReturnPlanParser )<|fim▁hole|> std::size_t bs_size = 10;
const char *ex = getenv("EXAMPLESDIR");
assert (ex != 0);
BeliefStateOffset* bso_instance = BeliefStateOffset::create(system_size, bs_size);
for (std::size_t i = 0; i <= 0; ++i)
{
std::stringstream out;
out << i;
std::string returnplan_file(ex);
returnplan_file += "/returnPlanParserTest" + out.str() + ".txt";
ReturnPlanParser_t returnplan_parser;
ReturnPlanMapPtr rpm = returnplan_parser.parseFile(returnplan_file);
std::cout << "Parsing return plans for context " << i << ". Got return interfaces:" << std::endl;
for (ReturnPlanMap::const_iterator it = rpm->begin(); it != rpm->end(); ++it)
{
std::size_t parent_id = it->first;
NewBeliefState* interface = it->second;
assert (interface != 0);
std::cout << parent_id << " " << *interface << std::endl;
}
std::cout << std::endl;
}
}
// Local Variables:
// mode: C++
// End:<|fim▁end|> | {
std::size_t system_size = 10; |
<|file_name|>data_diesel_sqlite.rs<|end_file_name|><|fim▁begin|>extern crate diesel;
extern crate dotenv;
use std::env;
use std::sync::Arc;
use crate::models::*;
use crate::result::{
Action, DataError, DataReadyResult, DataResult, IntoDataReadyResult, IntoDataResult, SpokenOk,
};
use crate::schema::logs::dsl::*;
use crate::schema::rides::dsl::*;
use crate::schema::routes::dsl::*;
use crate::spoken::SpokenData;
use async_trait::async_trait;
use diesel::prelude::*;
use diesel::r2d2;
use diesel::sqlite::SqliteConnection;
use dotenv::dotenv;
use futures::{executor::ThreadPool, future::lazy, task::SpawnExt};
use serde::Serialize;
impl<T> IntoDataResult<T, diesel::result::Error> for Result<T, diesel::result::Error> {
fn data_result(self) -> DataResult<T> {
self.map_err(|e| e.into())
}
}
impl<T> IntoDataResult<T, r2d2::Error> for Result<T, r2d2::PoolError> {
fn data_result(self) -> DataResult<T> {
self.map_err(|e| e.into())
}
}
impl<T: Serialize> IntoDataReadyResult<T, diesel::result::Error>
for Result<T, diesel::result::Error>
{
fn data_ready_result(self, action: Action) -> DataReadyResult<T> {
self.map(|r| SpokenOk::new(action, r)).map_err(|e| e.into())
}
}
impl From<r2d2::PoolError> for DataError {
fn from(from: r2d2::PoolError) -> Self {
Self::DbConn(Box::new(from))
}
}
impl From<diesel::result::Error> for DataError {
fn from(from: diesel::result::Error) -> Self {
use diesel::result::DatabaseErrorKind::*;
use diesel::result::Error::*;
match &from {
DatabaseError(kind, _err_info) => match kind {
UniqueViolation => Self::InvalidQuery(Box::new(from)),
ForeignKeyViolation => Self::InvalidQuery(Box::new(from)),
UnableToSendCommand => Self::InvalidQuery(Box::new(from)),
SerializationFailure => Self::InvalidQuery(Box::new(from)),
__Unknown => Self::Unknown,
},
NotFound => Self::NotFound(Box::new(from)),
QueryBuilderError(_) => Self::InvalidQuery(Box::new(from)),
DeserializationError(_) => Self::InvalidQuery(Box::new(from)),
SerializationError(_) => Self::InvalidQuery(Box::new(from)),
_ => Self::InvalidQuery(Box::new(from)),
}
}
}
type ConnectionManager = diesel::r2d2::ConnectionManager<SqliteConnection>;
#[derive(Clone)]
pub struct DieselSqlite {
db_pool: Arc<diesel::r2d2::Pool<ConnectionManager>>,
worker_pool: ThreadPool,
}
impl DieselSqlite {
pub fn open() -> DieselSqlite {
dotenv().ok();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let db_pool = Arc::new(
r2d2::Pool::builder()
.build(r2d2::ConnectionManager::new(&database_url))
.unwrap(),
);
let worker_pool = ThreadPool::builder()
.pool_size(4)
.name_prefix("spoken-")
.create()
.expect("thread pool");
DieselSqlite {
db_pool,
worker_pool,
}
}
pub fn get_conn(
&self,
) -> Result<diesel::r2d2::PooledConnection<ConnectionManager>, diesel::r2d2::PoolError> {
Ok(self.db_pool.get()?)<|fim▁hole|> &self,
) -> Result<diesel::r2d2::PooledConnection<ConnectionManager>, diesel::r2d2::PoolError> {
let pool = Arc::clone(&self.db_pool);
self.worker_pool
.spawn_with_handle(lazy(move |_| pool.get()))
.unwrap_or_else(|e| panic!("failed to spawn for a new connection; {:?}", e))
.await
}
}
#[async_trait]
impl SpokenData for DieselSqlite {
async fn list_logs(&self) -> DataReadyResult<Vec<Log>> {
let conn = self.get_conn_async().await.data_result()?;
self.worker_pool
.spawn_with_handle(lazy(move |_| logs.load::<Log>(&conn)))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.data_ready_result(Action::List)
}
async fn get_log(&self, log_name: &str) -> DataReadyResult<Log> {
let conn = self.get_conn_async().await.data_result()?;
let log_name = log_name.to_owned();
self.worker_pool
.spawn_with_handle(lazy(move |_| {
logs.filter(crate::schema::logs::name.eq(log_name))
.first::<Log>(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.data_ready_result(Action::Get)
}
async fn post_log(&self, new_log: NewLog) -> DataReadyResult<()> {
let conn = self.get_conn_async().await.data_result()?;
self.worker_pool
.spawn_with_handle(lazy(move |_| {
diesel::insert_into(logs).values(&new_log).execute(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.map(|_| ())
.data_ready_result(Action::Create)
}
async fn list_routes(&self) -> DataReadyResult<Vec<Route>> {
let conn = self.get_conn_async().await.data_result()?;
self.worker_pool
.spawn_with_handle(lazy(move |_| routes.load::<Route>(&conn)))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.data_ready_result(Action::List)
}
async fn post_route(&self, new_route: NewRoute) -> DataReadyResult<()> {
let conn = self.get_conn_async().await.data_result()?;
self.worker_pool
.spawn_with_handle(lazy(move |_| {
diesel::insert_into(routes)
.values(&new_route)
.execute(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.map(|_| ())
.data_ready_result(Action::Create)
}
async fn list_rides_in_log(&self, log: Log) -> DataReadyResult<Vec<Ride>> {
let conn = self.get_conn_async().await.data_result()?;
self.worker_pool
.spawn_with_handle(lazy(move |_| {
Ride::belonging_to(&log)
.order(timestamp.desc())
.load::<Ride>(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.data_ready_result(Action::List)
}
async fn get_route_avgs_in_log(
&self,
log: Log,
route_name: &str,
) -> DataReadyResult<Vec<RideAverage>> {
let conn = self.get_conn_async().await.data_result()?;
let route_name = route_name.to_owned();
self.worker_pool
.spawn_with_handle(lazy(move |_| {
RideAverage::belonging_to(&log)
.select((
crate::schema::rides::id,
log_id,
route,
crate::schema::rides::average,
))
.filter(route.eq(route_name))
.load::<RideAverage>(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.data_ready_result(Action::List)
}
async fn post_ride(&self, log: Log, mut ride: NewRide) -> DataReadyResult<()> {
let conn = self.get_conn_async().await.data_result()?;
ride.log_id = log.id;
self.worker_pool
.spawn_with_handle(lazy(move |_| {
diesel::insert_into(rides).values(&ride).execute(&conn)
}))
.unwrap_or_else(|e| panic!("failed to spawn; {:?}", e))
.await
.map(|_| ())
.data_ready_result(Action::Create)
}
}<|fim▁end|> | }
pub async fn get_conn_async( |
<|file_name|>animated_properties.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<%
from data import to_idl_name, SYSTEM_FONT_LONGHANDS, to_camel_case
from itertools import groupby
%>
#[cfg(feature = "gecko")] use crate::gecko_bindings::structs::nsCSSPropertyID;
use itertools::{EitherOrBoth, Itertools};
use crate::properties::{CSSWideKeyword, PropertyDeclaration, NonCustomPropertyIterator};
use crate::properties::longhands;
use crate::properties::longhands::visibility::computed_value::T as Visibility;
use crate::properties::LonghandId;
use servo_arc::Arc;
use smallvec::SmallVec;
use std::ptr;
use std::mem;
use crate::hash::FxHashMap;
use super::ComputedValues;
use crate::values::animated::{Animate, Procedure, ToAnimatedValue, ToAnimatedZero};
use crate::values::animated::effects::AnimatedFilter;
#[cfg(feature = "gecko")] use crate::values::computed::TransitionProperty;
use crate::values::computed::{ClipRect, Context};
use crate::values::computed::ToComputedValue;
use crate::values::distance::{ComputeSquaredDistance, SquaredDistance};
use crate::values::generics::effects::Filter;
use void::{self, Void};
/// Convert nsCSSPropertyID to TransitionProperty
#[cfg(feature = "gecko")]
#[allow(non_upper_case_globals)]
impl From<nsCSSPropertyID> for TransitionProperty {
fn from(property: nsCSSPropertyID) -> TransitionProperty {
use properties::ShorthandId;
match property {
% for prop in data.longhands:
${prop.nscsspropertyid()} => {
TransitionProperty::Longhand(LonghandId::${prop.camel_case})
}
% endfor
% for prop in data.shorthands_except_all():
${prop.nscsspropertyid()} => {
TransitionProperty::Shorthand(ShorthandId::${prop.camel_case})
}
% endfor
nsCSSPropertyID::eCSSPropertyExtra_all_properties => {
TransitionProperty::Shorthand(ShorthandId::All)
}
_ => {
panic!("non-convertible nsCSSPropertyID")
}
}
}
}
/// A collection of AnimationValue that were composed on an element.
/// This HashMap stores the values that are the last AnimationValue to be
/// composed for each TransitionProperty.
pub type AnimationValueMap = FxHashMap<LonghandId, AnimationValue>;
/// An enum to represent a single computed value belonging to an animated
/// property in order to be interpolated with another one. When interpolating,
/// both values need to belong to the same property.
///
/// FIXME: We need to add a path for custom properties, but that's trivial after
/// this (is a similar path to that of PropertyDeclaration).
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
#[derive(Debug)]
#[repr(u16)]
pub enum AnimationValue {
% for prop in data.longhands:
/// `${prop.name}`
% if prop.animatable and not prop.logical:
${prop.camel_case}(${prop.animated_type()}),
% else:
${prop.camel_case}(Void),
% endif
% endfor
}
<%
animated = []
unanimated = []
animated_with_logical = []
for prop in data.longhands:
if prop.animatable:
animated_with_logical.append(prop)
if prop.animatable and not prop.logical:
animated.append(prop)
else:
unanimated.append(prop)
%>
#[repr(C)]
struct AnimationValueVariantRepr<T> {
tag: u16,
value: T
}
impl Clone for AnimationValue {
#[inline]
fn clone(&self) -> Self {
use self::AnimationValue::*;
<%
[copy, others] = [list(g) for _, g in groupby(animated, key=lambda x: not x.specified_is_copy())]
%>
let self_tag = unsafe { *(self as *const _ as *const u16) };
if self_tag <= LonghandId::${copy[-1].camel_case} as u16 {
#[derive(Clone, Copy)]
#[repr(u16)]
enum CopyVariants {
% for prop in copy:
_${prop.camel_case}(${prop.animated_type()}),
% endfor
}
unsafe {
let mut out = mem::MaybeUninit::uninit();
ptr::write(
out.as_mut_ptr() as *mut CopyVariants,
*(self as *const _ as *const CopyVariants),
);
return out.assume_init();
}
}
match *self {
% for ty, props in groupby(others, key=lambda x: x.animated_type()):
<% props = list(props) %>
${" |\n".join("{}(ref value)".format(prop.camel_case) for prop in props)} => {
% if len(props) == 1:
${props[0].camel_case}(value.clone())
% else:
unsafe {
let mut out = mem::MaybeUninit::uninit();
ptr::write(
out.as_mut_ptr() as *mut AnimationValueVariantRepr<${ty}>,
AnimationValueVariantRepr {
tag: *(self as *const _ as *const u16),
value: value.clone(),
},
);
out.assume_init()
}
% endif
}
% endfor
_ => unsafe { debug_unreachable!() }
}
}
}
impl PartialEq for AnimationValue {
#[inline]
fn eq(&self, other: &Self) -> bool {
use self::AnimationValue::*;
unsafe {
let this_tag = *(self as *const _ as *const u16);
let other_tag = *(other as *const _ as *const u16);
if this_tag != other_tag {
return false;
}
match *self {
% for ty, props in groupby(animated, key=lambda x: x.animated_type()):
${" |\n".join("{}(ref this)".format(prop.camel_case) for prop in props)} => {
let other_repr =
&*(other as *const _ as *const AnimationValueVariantRepr<${ty}>);
*this == other_repr.value
}
% endfor
${" |\n".join("{}(void)".format(prop.camel_case) for prop in unanimated)} => {
void::unreachable(void)
}
}
}
}
}
impl AnimationValue {
/// Returns the longhand id this animated value corresponds to.
#[inline]
pub fn id(&self) -> LonghandId {
let id = unsafe { *(self as *const _ as *const LonghandId) };
debug_assert_eq!(id, match *self {
% for prop in data.longhands:
% if prop.animatable and not prop.logical:
AnimationValue::${prop.camel_case}(..) => LonghandId::${prop.camel_case},
% else:
AnimationValue::${prop.camel_case}(void) => void::unreachable(void),
% endif
% endfor
});
id
}
/// "Uncompute" this animation value in order to be used inside the CSS
/// cascade.
pub fn uncompute(&self) -> PropertyDeclaration {
use crate::properties::longhands;
use self::AnimationValue::*;
use super::PropertyDeclarationVariantRepr;
match *self {
<% keyfunc = lambda x: (x.base_type(), x.specified_type(), x.boxed, x.is_animatable_with_computed_value) %>
% for (ty, specified, boxed, computed), props in groupby(animated, key=keyfunc):
<% props = list(props) %>
${" |\n".join("{}(ref value)".format(prop.camel_case) for prop in props)} => {
% if not computed:
let ref value = ToAnimatedValue::from_animated_value(value.clone());
% endif
let value = ${ty}::from_computed_value(&value);
% if boxed:
let value = Box::new(value);
% endif
% if len(props) == 1:
PropertyDeclaration::${props[0].camel_case}(value)
% else:
unsafe {
let mut out = mem::MaybeUninit::uninit();
ptr::write(
out.as_mut_ptr() as *mut PropertyDeclarationVariantRepr<${specified}>,
PropertyDeclarationVariantRepr {
tag: *(self as *const _ as *const u16),
value,
},
);
out.assume_init()
}
% endif
}
% endfor
${" |\n".join("{}(void)".format(prop.camel_case) for prop in unanimated)} => {
void::unreachable(void)
}
}
}
/// Construct an AnimationValue from a property declaration.
pub fn from_declaration(
decl: &PropertyDeclaration,
context: &mut Context,
extra_custom_properties: Option<<&Arc<crate::custom_properties::CustomPropertiesMap>>,
initial: &ComputedValues
) -> Option<Self> {
use super::PropertyDeclarationVariantRepr;
<%
keyfunc = lambda x: (
x.specified_type(),
x.animated_type(),
x.boxed,
not x.is_animatable_with_computed_value,
x.style_struct.inherited,
x.ident in SYSTEM_FONT_LONGHANDS and engine == "gecko",
)
%>
let animatable = match *decl {
% for (specified_ty, ty, boxed, to_animated, inherit, system), props in groupby(animated_with_logical, key=keyfunc):
${" |\n".join("PropertyDeclaration::{}(ref value)".format(prop.camel_case) for prop in props)} => {
let decl_repr = unsafe {
&*(decl as *const _ as *const PropertyDeclarationVariantRepr<${specified_ty}>)
};
let longhand_id = unsafe {
*(&decl_repr.tag as *const u16 as *const LonghandId)
};
% if inherit:
context.for_non_inherited_property = None;
% else:
context.for_non_inherited_property = Some(longhand_id);
% endif
% if system:
if let Some(sf) = value.get_system() {
longhands::system_font::resolve_system_font(sf, context)
}
% endif
% if boxed:
let value = (**value).to_computed_value(context);
% else:
let value = value.to_computed_value(context);
% endif
% if to_animated:
let value = value.to_animated_value();
% endif
unsafe {
let mut out = mem::MaybeUninit::uninit();
ptr::write(
out.as_mut_ptr() as *mut AnimationValueVariantRepr<${ty}>,
AnimationValueVariantRepr {
tag: longhand_id.to_physical(context.builder.writing_mode) as u16,
value,
},
);
out.assume_init()
}
}
% endfor
PropertyDeclaration::CSSWideKeyword(ref declaration) => {
match declaration.id {
// We put all the animatable properties first in the hopes
// that it might increase match locality.
% for prop in data.longhands:
% if prop.animatable:
LonghandId::${prop.camel_case} => {
// FIXME(emilio, bug 1533327): I think
// CSSWideKeyword::Revert handling is not fine here, but
// what to do instead?
//
// Seems we'd need the computed value as if it was
// revert, somehow. Treating it as `unset` seems fine
// for now...
let style_struct = match declaration.keyword {
% if not prop.style_struct.inherited:
CSSWideKeyword::Revert |
CSSWideKeyword::Unset |
% endif
CSSWideKeyword::Initial => {
initial.get_${prop.style_struct.name_lower}()
},
% if prop.style_struct.inherited:
CSSWideKeyword::Revert |
CSSWideKeyword::Unset |
% endif
CSSWideKeyword::Inherit => {
context.builder
.get_parent_${prop.style_struct.name_lower}()
},
};
let computed = style_struct
% if prop.logical:
.clone_${prop.ident}(context.builder.writing_mode);
% else:
.clone_${prop.ident}();<|fim▁hole|> % endif
% if not prop.is_animatable_with_computed_value:
let computed = computed.to_animated_value();
% endif
% if prop.logical:
let wm = context.builder.writing_mode;
<%helpers:logical_setter_helper name="${prop.name}">
<%def name="inner(physical_ident)">
AnimationValue::${to_camel_case(physical_ident)}(computed)
</%def>
</%helpers:logical_setter_helper>
% else:
AnimationValue::${prop.camel_case}(computed)
% endif
},
% endif
% endfor
% for prop in data.longhands:
% if not prop.animatable:
LonghandId::${prop.camel_case} => return None,
% endif
% endfor
}
},
PropertyDeclaration::WithVariables(ref declaration) => {
let substituted = {
let custom_properties =
extra_custom_properties.or_else(|| context.style().custom_properties());
declaration.value.substitute_variables(
declaration.id,
custom_properties,
context.quirks_mode,
context.device(),
)
};
return AnimationValue::from_declaration(
&substituted,
context,
extra_custom_properties,
initial,
)
},
_ => return None // non animatable properties will get included because of shorthands. ignore.
};
Some(animatable)
}
/// Get an AnimationValue for an AnimatableLonghand from a given computed values.
pub fn from_computed_values(
property: LonghandId,
style: &ComputedValues,
) -> Option<Self> {
let property = property.to_physical(style.writing_mode);
Some(match property {
% for prop in data.longhands:
% if prop.animatable and not prop.logical:
LonghandId::${prop.camel_case} => {
let computed = style.clone_${prop.ident}();
AnimationValue::${prop.camel_case}(
% if prop.is_animatable_with_computed_value:
computed
% else:
computed.to_animated_value()
% endif
)
}
% endif
% endfor
_ => return None,
})
}
/// Update `style` with the value of this `AnimationValue`.
///
/// SERVO ONLY: This doesn't properly handle things like updating 'em' units
/// when animated font-size.
pub fn set_in_style_for_servo(&self, style: &mut ComputedValues) {
match self {
% for prop in data.longhands:
% if prop.animatable and not prop.logical:
AnimationValue::${prop.camel_case}(ref value) => {
% if not prop.is_animatable_with_computed_value:
let value: longhands::${prop.ident}::computed_value::T =
ToAnimatedValue::from_animated_value(value.clone());
style.mutate_${prop.style_struct.name_lower}().set_${prop.ident}(value);
% else:
style.mutate_${prop.style_struct.name_lower}().set_${prop.ident}(value.clone());
% endif
}
% else:
AnimationValue::${prop.camel_case}(..) => unreachable!(),
% endif
% endfor
}
}
}
fn animate_discrete<T: Clone>(this: &T, other: &T, procedure: Procedure) -> Result<T, ()> {
if let Procedure::Interpolate { progress } = procedure {
Ok(if progress < 0.5 { this.clone() } else { other.clone() })
} else {
Err(())
}
}
impl Animate for AnimationValue {
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> {
Ok(unsafe {
use self::AnimationValue::*;
let this_tag = *(self as *const _ as *const u16);
let other_tag = *(other as *const _ as *const u16);
if this_tag != other_tag {
panic!("Unexpected AnimationValue::animate call");
}
match *self {
<% keyfunc = lambda x: (x.animated_type(), x.animation_value_type == "discrete") %>
% for (ty, discrete), props in groupby(animated, key=keyfunc):
${" |\n".join("{}(ref this)".format(prop.camel_case) for prop in props)} => {
let other_repr =
&*(other as *const _ as *const AnimationValueVariantRepr<${ty}>);
% if discrete:
let value = animate_discrete(this, &other_repr.value, procedure)?;
% else:
let value = this.animate(&other_repr.value, procedure)?;
% endif
let mut out = mem::MaybeUninit::uninit();
ptr::write(
out.as_mut_ptr() as *mut AnimationValueVariantRepr<${ty}>,
AnimationValueVariantRepr {
tag: this_tag,
value,
},
);
out.assume_init()
}
% endfor
${" |\n".join("{}(void)".format(prop.camel_case) for prop in unanimated)} => {
void::unreachable(void)
}
}
})
}
}
<%
nondiscrete = []
for prop in animated:
if prop.animation_value_type != "discrete":
nondiscrete.append(prop)
%>
impl ComputeSquaredDistance for AnimationValue {
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
unsafe {
use self::AnimationValue::*;
let this_tag = *(self as *const _ as *const u16);
let other_tag = *(other as *const _ as *const u16);
if this_tag != other_tag {
panic!("Unexpected AnimationValue::compute_squared_distance call");
}
match *self {
% for ty, props in groupby(nondiscrete, key=lambda x: x.animated_type()):
${" |\n".join("{}(ref this)".format(prop.camel_case) for prop in props)} => {
let other_repr =
&*(other as *const _ as *const AnimationValueVariantRepr<${ty}>);
this.compute_squared_distance(&other_repr.value)
}
% endfor
_ => Err(()),
}
}
}
}
impl ToAnimatedZero for AnimationValue {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
match *self {
% for prop in data.longhands:
% if prop.animatable and not prop.logical and prop.animation_value_type != "discrete":
AnimationValue::${prop.camel_case}(ref base) => {
Ok(AnimationValue::${prop.camel_case}(base.to_animated_zero()?))
},
% endif
% endfor
_ => Err(()),
}
}
}
/// A trait to abstract away the different kind of animations over a list that
/// there may be.
pub trait ListAnimation<T> : Sized {
/// <https://drafts.csswg.org/css-transitions/#animtype-repeatable-list>
fn animate_repeatable_list(&self, other: &Self, procedure: Procedure) -> Result<Self, ()>
where
T: Animate;
/// <https://drafts.csswg.org/css-transitions/#animtype-repeatable-list>
fn squared_distance_repeatable_list(&self, other: &Self) -> Result<SquaredDistance, ()>
where
T: ComputeSquaredDistance;
/// This is the animation used for some of the types like shadows and
/// filters, where the interpolation happens with the zero value if one of
/// the sides is not present.
fn animate_with_zero(&self, other: &Self, procedure: Procedure) -> Result<Self, ()>
where
T: Animate + Clone + ToAnimatedZero;
/// This is the animation used for some of the types like shadows and
/// filters, where the interpolation happens with the zero value if one of
/// the sides is not present.
fn squared_distance_with_zero(&self, other: &Self) -> Result<SquaredDistance, ()>
where
T: ToAnimatedZero + ComputeSquaredDistance;
}
macro_rules! animated_list_impl {
(<$t:ident> for $ty:ty) => {
impl<$t> ListAnimation<$t> for $ty {
fn animate_repeatable_list(
&self,
other: &Self,
procedure: Procedure,
) -> Result<Self, ()>
where
T: Animate,
{
// If the length of either list is zero, the least common multiple is undefined.
if self.is_empty() || other.is_empty() {
return Err(());
}
use num_integer::lcm;
let len = lcm(self.len(), other.len());
self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(this, other)| {
this.animate(other, procedure)
}).collect()
}
fn squared_distance_repeatable_list(
&self,
other: &Self,
) -> Result<SquaredDistance, ()>
where
T: ComputeSquaredDistance,
{
if self.is_empty() || other.is_empty() {
return Err(());
}
use num_integer::lcm;
let len = lcm(self.len(), other.len());
self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(this, other)| {
this.compute_squared_distance(other)
}).sum()
}
fn animate_with_zero(
&self,
other: &Self,
procedure: Procedure,
) -> Result<Self, ()>
where
T: Animate + Clone + ToAnimatedZero
{
if procedure == Procedure::Add {
return Ok(
self.iter().chain(other.iter()).cloned().collect()
);
}
self.iter().zip_longest(other.iter()).map(|it| {
match it {
EitherOrBoth::Both(this, other) => {
this.animate(other, procedure)
},
EitherOrBoth::Left(this) => {
this.animate(&this.to_animated_zero()?, procedure)
},
EitherOrBoth::Right(other) => {
other.to_animated_zero()?.animate(other, procedure)
}
}
}).collect()
}
fn squared_distance_with_zero(
&self,
other: &Self,
) -> Result<SquaredDistance, ()>
where
T: ToAnimatedZero + ComputeSquaredDistance
{
self.iter().zip_longest(other.iter()).map(|it| {
match it {
EitherOrBoth::Both(this, other) => {
this.compute_squared_distance(other)
},
EitherOrBoth::Left(list) | EitherOrBoth::Right(list) => {
list.to_animated_zero()?.compute_squared_distance(list)
},
}
}).sum()
}
}
}
}
animated_list_impl!(<T> for crate::OwnedSlice<T>);
animated_list_impl!(<T> for SmallVec<[T; 1]>);
animated_list_impl!(<T> for Vec<T>);
/// <https://drafts.csswg.org/web-animations-1/#animating-visibility>
impl Animate for Visibility {
#[inline]
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> {
match procedure {
Procedure::Interpolate { .. } => {
let (this_weight, other_weight) = procedure.weights();
match (*self, *other) {
(Visibility::Visible, _) => {
Ok(if this_weight > 0.0 { *self } else { *other })
},
(_, Visibility::Visible) => {
Ok(if other_weight > 0.0 { *other } else { *self })
},
_ => Err(()),
}
},
_ => Err(()),
}
}
}
impl ComputeSquaredDistance for Visibility {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt(if *self == *other { 0. } else { 1. }))
}
}
impl ToAnimatedZero for Visibility {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Err(())
}
}
/// <https://drafts.csswg.org/css-transitions/#animtype-rect>
impl Animate for ClipRect {
#[inline]
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> {
use crate::values::computed::LengthOrAuto;
let animate_component = |this: &LengthOrAuto, other: &LengthOrAuto| {
let result = this.animate(other, procedure)?;
if let Procedure::Interpolate { .. } = procedure {
return Ok(result);
}
if result.is_auto() {
// FIXME(emilio): Why? A couple SMIL tests fail without this,
// but it seems extremely fishy.
return Err(());
}
Ok(result)
};
Ok(ClipRect {
top: animate_component(&self.top, &other.top)?,
right: animate_component(&self.right, &other.right)?,
bottom: animate_component(&self.bottom, &other.bottom)?,
left: animate_component(&self.left, &other.left)?,
})
}
}
<%
FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale',
'HueRotate', 'Invert', 'Opacity', 'Saturate',
'Sepia' ]
%>
/// <https://drafts.fxtf.org/filters/#animation-of-filters>
impl Animate for AnimatedFilter {
fn animate(
&self,
other: &Self,
procedure: Procedure,
) -> Result<Self, ()> {
use crate::values::animated::animate_multiplicative_factor;
match (self, other) {
% for func in ['Blur', 'Grayscale', 'HueRotate', 'Invert', 'Sepia']:
(&Filter::${func}(ref this), &Filter::${func}(ref other)) => {
Ok(Filter::${func}(this.animate(other, procedure)?))
},
% endfor
% for func in ['Brightness', 'Contrast', 'Opacity', 'Saturate']:
(&Filter::${func}(this), &Filter::${func}(other)) => {
Ok(Filter::${func}(animate_multiplicative_factor(this, other, procedure)?))
},
% endfor
% if engine == "gecko":
(&Filter::DropShadow(ref this), &Filter::DropShadow(ref other)) => {
Ok(Filter::DropShadow(this.animate(other, procedure)?))
},
% endif
_ => Err(()),
}
}
}
/// <http://dev.w3.org/csswg/css-transforms/#none-transform-animation>
impl ToAnimatedZero for AnimatedFilter {
fn to_animated_zero(&self) -> Result<Self, ()> {
match *self {
% for func in ['Blur', 'Grayscale', 'HueRotate', 'Invert', 'Sepia']:
Filter::${func}(ref this) => Ok(Filter::${func}(this.to_animated_zero()?)),
% endfor
% for func in ['Brightness', 'Contrast', 'Opacity', 'Saturate']:
Filter::${func}(_) => Ok(Filter::${func}(1.)),
% endfor
% if engine == "gecko":
Filter::DropShadow(ref this) => Ok(Filter::DropShadow(this.to_animated_zero()?)),
% endif
_ => Err(()),
}
}
}
/// An iterator over all the properties that transition on a given style.
pub struct TransitionPropertyIterator<'a> {
style: &'a ComputedValues,
index_range: core::ops::Range<usize>,
longhand_iterator: Option<NonCustomPropertyIterator<LonghandId>>,
}
impl<'a> TransitionPropertyIterator<'a> {
/// Create a `TransitionPropertyIterator` for the given style.
pub fn from_style(style: &'a ComputedValues) -> Self {
Self {
style,
index_range: 0..style.get_box().transition_property_count(),
longhand_iterator: None,
}
}
}
/// A single iteration of the TransitionPropertyIterator.
pub struct TransitionPropertyIteration {
/// The id of the longhand for this property.
pub longhand_id: LonghandId,
/// The index of this property in the list of transition properties for this
/// iterator's style.
pub index: usize,
}
impl<'a> Iterator for TransitionPropertyIterator<'a> {
type Item = TransitionPropertyIteration;
fn next(&mut self) -> Option<Self::Item> {
use crate::values::computed::TransitionProperty;
loop {
if let Some(ref mut longhand_iterator) = self.longhand_iterator {
if let Some(longhand_id) = longhand_iterator.next() {
return Some(TransitionPropertyIteration {
longhand_id,
index: self.index_range.start,
});
}
self.longhand_iterator = None;
}
let index = self.index_range.next()?;
match self.style.get_box().transition_property_at(index) {
TransitionProperty::Longhand(longhand_id) => {
return Some(TransitionPropertyIteration {
longhand_id,
index,
})
}
// In the other cases, we set up our state so that we are ready to
// compute the next value of the iterator and then loop (equivalent
// to calling self.next()).
TransitionProperty::Shorthand(ref shorthand_id) =>
self.longhand_iterator = Some(shorthand_id.longhands()),
TransitionProperty::Custom(..) | TransitionProperty::Unsupported(..) => {}
}
}
}
}<|fim▁end|> | |
<|file_name|>view_utils.py<|end_file_name|><|fim▁begin|>"""
Utilities for all views
Ben Adida (12-30-2008)
"""
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import loader
import helios_auth
from helios_auth.security import get_user
##
## BASICS
##
SUCCESS = HttpResponse("SUCCESS")
##
## template abstraction
##
def prepare_vars(request, values):
vars_with_user = values.copy()<|fim▁hole|> vars_with_user['SECURE_URL_HOST'] = settings.SECURE_URL_HOST
vars_with_user['STATIC'] = '/static/auth'
vars_with_user['MEDIA_URL'] = '/static/auth/'
vars_with_user['TEMPLATE_BASE'] = helios_auth.TEMPLATE_BASE
vars_with_user['TEMPLATE_BASENONAV'] = helios_auth.TEMPLATE_BASENONAV
vars_with_user['settings'] = settings
return vars_with_user
def render_template(request, template_name, values=None):
vars_with_user = prepare_vars(request, values or {})
return render_to_response('helios_auth/templates/%s.html' % template_name, vars_with_user)
def render_template_raw(request, template_name, values=None):
t = loader.get_template(template_name + '.html')
values = values or {}
vars_with_user = prepare_vars(request, values)
return t.render(context=vars_with_user, request=request)
def render_json(json_txt):
return HttpResponse(json_txt)<|fim▁end|> |
if request:
vars_with_user['user'] = get_user(request)
vars_with_user['csrf_token'] = request.session['csrf_token'] |
<|file_name|>trace_analyzer_old.py<|end_file_name|><|fim▁begin|>__author__= "barun"
__date__ = "$20 May, 2011 12:25:36 PM$"
from metrics import Metrics
from wireless_fields import *
DATA_PKTS = ('tcp', 'udp', 'ack',)
def is_control_pkt(pkt_type=''):
return pkt_type not in DATA_PKTS
class TraceAnalyzer(object):
'''
Trace Analyzer
'''
def __init__(self, file_name=None):
print 'Trace Analyzer'
self._receiveEvents = []
self._sendEvents = []
self._dropEvents = []
self._otherEvents = []
self._data_pkts_rcvd = []
self._cntrl_pkts_rcvd = []
self._sourceNodes = []<|fim▁hole|> self._destinationNodes = []
self.parse_events(file_name)
self.get_statistics()
def parse_events(self, file_name):
'''
Parse the send, receive and drop events, and store them in a list. This
method should get called only once (from inside __init__) at the
beginning of processing.
'''
print 'Parse events -- Use normal record scan to filter receive events'
if file_name:
trace_file = None
try:
trace_file = open(file_name, 'r')
for event in trace_file:
if event[0] == EVENT_RECEIVE:
self._receiveEvents.append(event)
elif event[0] == EVENT_SEND:
self._sendEvents.append(event)
elif event[0] == EVENT_DROP:
self._dropEvents.append(event)
else:
self._otherEvents.append(event)
except IOError, ioe:
print 'IOError:', str(ioe)
finally:
if trace_file:
trace_file.close()
for event in self._receiveEvents:
event = event.split()
try:
if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and\
event[I_TRACE_LEVEL_TOKEN] == S_TRACE_LEVEL_TOKEN and\
event[I_PKT_TYPE] in DATA_PKTS:
self._data_pkts_rcvd.append(event)
else:
self._cntrl_pkts_rcvd.append(event)
except IndexError:
#print event
self._data_pkts_rcvd.append(event)
continue
# Determine sending and receiving nodes
for event in self._sendEvents:
try:
event = event.split()
if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and \
event[I_PKT_TYPE] in DATA_PKTS:
if event[I_SRC_FIELD_TOKEN] == S_SRC_FIELD_TOKEN:
src = event[I_SRC_ADDR_PORT].split('.')[0]
if src not in self._sourceNodes and int(src) >= 0:
self._sourceNodes.append(src)
else:
continue
# Is is required to have destination nodes???
# In case of TCP, source nodes themselves will become
# destination of acknowledgements
#
# if event[I_PKT_TYPE_TOKEN] == S_PKT_TYPE_TOKEN and \
# event[I_PKT_TYPE] in DATA_PKTS:
# if event[I_DST_FIELD_TOKEN] == S_DST_FIELD_TOKEN:
# dst = event[I_DST_ADDR_PORT].split('.')[0]
# if dst not in self._destinationNodes and int(dst) >= 0:
# self._destinationNodes.append(dst)
# else:
# continue
except IndexError:
# IndexError can occur because certain log entries from MAC
# layer may not have source and destination infos -- don't
# know exactly why
continue
# Compute simulation times
try:
self._simulationStartTime = float(self._sendEvents[0].split()[I_TIMESTAMP])
except IndexError:
self._simulationStartTime = 0
try:
self._simulationEndTime = float(self._sendEvents[len(self._sendEvents)-1].split()[I_TIMESTAMP])
except IndexError:
self._simulationEndTime = 0
self._simulationDuration = self._simulationEndTime - self._simulationStartTime
def get_statistics(self):
msg = '''
Simulation start: %f
Simulation end: %f
Duration: %f
Source nodes: %s
# of packets sent: %d
# of packets received: %d
# of data packets: %d
# of control packets:%d
# of packets droped: %d
# of other events: %d
''' % (
self._simulationStartTime,
self._simulationEndTime,
self._simulationDuration,
self._sourceNodes,
len(self._sendEvents),
len(self._receiveEvents),
len(self._data_pkts_rcvd),
len(self._cntrl_pkts_rcvd),
len(self._dropEvents),
len(self._otherEvents),
)
print msg
def get_average_throughput(self):
Metrics.averageThroughput()
def get_instantaneous_throughput(self):
Metrics.instantaneousThroughput()<|fim▁end|> | |
<|file_name|>unit.rs<|end_file_name|><|fim▁begin|>type Health = f64;
type Attack = f64;
type Level = u8;
type Experience = f64;
pub trait Unit {
fn new(health: f64, attack: f64) -> Self;<|fim▁hole|> fn realize_hp(&self) -> f64;
fn realize_atk(&self) -> f64;
fn change_current_hp(&mut self, amount: f64);
fn show(&self) -> String {
format!("{}hp {}atk",
self.realize_hp() as i32,
self.realize_atk() as i32)
}
}
#[derive(Clone)]
pub struct Hero {
hp: Health,
atk: Attack,
lvl: Level,
exp: Experience,
}
impl Unit for Hero {
fn new(health: f64, attack: f64) -> Hero {
Hero {
hp: health,
atk: attack,
lvl: 1,
exp: 0.0,
}
}
fn realize_hp(&self) -> f64 { self.hp }
fn realize_atk(&self) -> f64 { self.atk }
fn change_current_hp(&mut self, amount: f64) { self.hp += amount; }
}
impl Hero {
pub fn realize_lvl(&self) -> u8 { self.lvl }
pub fn realize_exp(&self) -> f64 { self.exp }
}
#[derive(Clone)]
pub struct NonHero {
hp: Health,
atk: Attack,
}
impl Unit for NonHero {
fn new(health: f64, attack: f64) -> NonHero {
NonHero {
hp: health,
atk: attack,
}
}
fn realize_hp(&self) -> f64 { self.hp }
fn realize_atk(&self) -> f64 { self.atk }
fn change_current_hp(&mut self, amount: f64) { self.hp += amount; }
}<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url
from django.contrib import admin<|fim▁hole|>from dashboard.views import QuestionApi
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'zuobiao.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/question/(?P<pk>\d+)/$', QuestionApi.as_view(), name='question_api'),
)<|fim▁end|> | |
<|file_name|>app.component.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';<|fim▁hole|>@Component({
selector: 'app-root',
template: `<router-outlet></router-outlet>`
})
export class AppComponent {}<|fim▁end|> | |
<|file_name|>MathML.js<|end_file_name|><|fim▁begin|>/* -*- Mode: Javascript; indent-tabs-mode:nil; js-indent-level: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/*************************************************************
*<|fim▁hole|> *
* Copyright (c) 2009-2015 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
MathJax.Localization.addTranslation("vi","MathML",{
version: "2.5.0",
isLoaded: true,
strings: {
BadMglyph: "mglyph h\u1ECFng: %1",
BadMglyphFont: "Ph\u00F4ng ch\u1EEF h\u1ECFng: %1",
MathPlayer: "MathJax kh\u00F4ng th\u1EC3 thi\u1EBFt l\u1EADp MathPlayer.\n\nN\u1EBFu MathPlayer ch\u01B0a \u0111\u01B0\u1EE3c c\u00E0i \u0111\u1EB7t, b\u1EA1n c\u1EA7n ph\u1EA3i c\u00E0i \u0111\u1EB7t n\u00F3 tr\u01B0\u1EDBc ti\u00EAn.\nN\u1EBFu kh\u00F4ng, c\u00E1c t\u00F9y ch\u1ECDn b\u1EA3o m\u1EADt c\u1EE7a b\u1EA1n c\u00F3 th\u1EC3 ng\u0103n tr\u1EDF c\u00E1c \u0111i\u1EC1u khi\u1EC3n ActiveX. H\u00E3y ch\u1ECDn T\u00F9y ch\u1ECDn Internet trong tr\u00ECnh \u0111\u01A1n C\u00F4ng c\u1EE5, qua th\u1EBB B\u1EA3o m\u1EADt, v\u00E0 b\u1EA5m n\u00FAt M\u1EE9c t\u00F9y ch\u1EC9nh. Ki\u1EC3m c\u00E1c h\u1ED9p \u201CCh\u1EA1y \u0111i\u1EC1u khi\u1EC3n ActiveX\u201D v\u00E0 \u201CH\u00E0nh vi nh\u1ECB ph\u00E2n v\u00E0 k\u1ECBch b\u1EA3n\u201D.\n\nHi\u1EC7n t\u1EA1i b\u1EA1n s\u1EBD g\u1EB7p c\u00E1c th\u00F4ng b\u00E1o l\u1ED7i thay v\u00EC to\u00E1n h\u1ECDc \u0111\u01B0\u1EE3c k\u1EBFt xu\u1EA5t.",
CantCreateXMLParser: "MathJax kh\u00F4ng th\u1EC3 t\u1EA1o ra b\u1ED9 ph\u00E2n t\u00EDch XML cho MathML. H\u00E3y ch\u1ECDn T\u00F9y ch\u1ECDn Internet trong tr\u00ECnh \u0111\u01A1n C\u00F4ng c\u1EE5, qua th\u1EBB B\u1EA3o m\u1EADt, v\u00E0 b\u1EA5m n\u00FAt M\u1EE9c t\u00F9y ch\u1EC9nh. Ki\u1EC3m h\u1ED9p \u201CScript c\u00E1c \u0111i\u1EC1u khi\u1EC3n ActiveX \u0111\u01B0\u1EE3c \u0111\u00E1nh d\u1EA5u l\u00E0 an to\u00E0n\u201D.\n\nMathJax s\u1EBD kh\u00F4ng th\u1EC3 x\u1EED l\u00FD c\u00E1c ph\u01B0\u01A1ng tr\u00ECnh MathML.",
UnknownNodeType: "Ki\u1EC3u n\u00FAt kh\u00F4ng r\u00F5: %1",
UnexpectedTextNode: "N\u00FAt v\u0103n b\u1EA3n b\u1EA5t ng\u1EEB: %1",
ErrorParsingMathML: "L\u1ED7i khi ph\u00E2n t\u00EDch MathML",
ParsingError: "L\u1ED7i khi ph\u00E2n t\u00EDch MathML: %1",
MathMLSingleElement: "MathML ph\u1EA3i ch\u1EC9 c\u00F3 m\u1ED9t ph\u1EA7n t\u1EED g\u1ED1c",
MathMLRootElement: "Ph\u1EA7n t\u1EED g\u1ED1c c\u1EE7a MathML ph\u1EA3i l\u00E0 \u003Cmath\u003E, ch\u1EE9 kh\u00F4ng ph\u1EA3i %1"
}
});
MathJax.Ajax.loadComplete("[MathJax]/localization/vi/MathML.js");<|fim▁end|> | * MathJax/localization/vi/MathML.js |
<|file_name|>ios.rs<|end_file_name|><|fim▁begin|>#![cfg(target_os = "ios")]
use crate::platform::ContextTraitExt;
use crate::{Context, ContextCurrentState};
<|fim▁hole|>
use std::os::raw;
impl<T: ContextCurrentState> ContextTraitExt for Context<T> {
type Handle = *mut raw::c_void;
#[inline]
unsafe fn raw_handle(&self) -> Self::Handle {
self.context.raw_handle()
}
#[inline]
unsafe fn get_egl_display(&self) -> Option<*const raw::c_void> {
None
}
}<|fim▁end|> | pub use winit::platform::ios::*; |
<|file_name|>tools.js<|end_file_name|><|fim▁begin|>var jwt = require('jsonwebtoken');
/**
* Middleware
*/
module.exports = function(scullog){
return {
realIp: function* (next) {
this.req.ip = this.headers['x-forwarded-for'] || this.ip;
yield* next;
},
handelError: function* (next) {
try {
yield* next;
} catch (err) {
this.status = err.status || 500;
this.body = err.message;
C.logger.error(err.stack);
this.app.emit('error', err, this);
}
},
loadRealPath: function* (next) {
// router url format must be /api/(.*)
this.request.fPath = scullog.getFileManager().filePath(this.params[0], this.request.query.base);
C.logger.info(this.request.fPath);
yield* next;
},
checkPathExists: function* (next) {
// Must after loadRealPath
if (!(yield scullog.getFileManager().exists(this.request.fPath))) {
this.status = 404;
this.body = 'Path Not Exists!';
}
else {
yield* next;
}
},
checkBase: function* (next){
var base = this.request.query.base;
if (!!!base || scullog.getConfiguration().directory.indexOf(base) == -1) {
this.status = 400;<|fim▁hole|> }
},
checkPathNotExists: function* (next) {
// Must after loadRealPath
if (this.query.type != 'UPLOAD_FILE' && (yield scullog.getFileManager().exists(this.request.fPath))) {
this.status = 400;
this.body = 'Path Has Exists!';
}
else {
yield* next;
}
},
checkAccessCookie: function* (next) {
if (this.request.url.indexOf('/access') == -1) {
var accessJwt = this.cookies.get(scullog.getConfiguration().id);
if (accessJwt) {
try {
var decoded = jwt.verify(accessJwt, scullog.getConfiguration().secret);
} catch (e) {
this.append('access-expired', 'true');
}
} else if (this.request.header["access-role"] != "default") {
this.append('access-expired', 'true');
}
}
yield* next;
}
}
};<|fim▁end|> | this.body = 'Invalid Base Location!';
} else {
yield* next; |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>(function($){
$(function(){
triggerNavigation();
triggerScrollTop();
triggerTooltips();
initMQ();
});
function autoplayVideo(video, autoplay) {
autoplay = autoplay || false;
var $carousel = $('.carousel');
$carousel.on('slide.bs.carousel', function(e) {
video.pause();
});
$(video).on('ended', function (e) {
setTimeout(function () {
$carousel.carousel('next');
$carousel.carousel('cycle');
}, 1000);
});
if (autoplay) {
$(window).on('scroll.video.trigger resize.video.trigger', function () {
if ($(video).isOnScreen(0.8)) {
$(window).off('scroll.video.trigger resize.video.trigger');
video.play();
}
});
}
}
function triggerNavigation() {
var $nav_trigger = $('.js-navi-trigger');
$nav_trigger.on('mouseenter', function (e) {
$('#'+$(this).data('trigger')).addClass('navi-open-icons');
});
$nav_trigger.on('mouseleave', function (e) {
$('#'+$(this).data('trigger')).removeClass('navi-open-icons');
});
$nav = $('.js-nav');
$nav.on('mouseenter', function (e) {
$(this).addClass('navi-open-full');
});
$nav.on('mouseleave', function (e) {
$(this).removeClass('navi-open-full');
});
}
function triggerScrollTop() {
var $up_trigger = $('.js-up-trigger');
$up_trigger.hide().on('click', function (e) {
e.preventDefault();<|fim▁hole|> });
$(window).on('scroll.up.trigger resize.up.trigger', function () {
if ($(this).scrollTop() > $(this).outerHeight() / 2) {
$up_trigger.fadeIn();
} else {
$up_trigger.fadeOut();
}
});
}
function triggerTooltips() {
$('[data-toggle="tooltip"]').tooltip();
}
function initMQ() {
var queries = [
{
context: 'default',
match: function() {
$('img').each(function() {
if (typeof $(this).data('default') == 'undefined') $(this).attr('data-default', $(this).attr('src'));
var small = $(this).data('default');
if (small) $(this).attr('src', small);
});
}
},
{
context: 'medium',
match: function() {
$('img').each(function() {
if (typeof $(this).data('default') == 'undefined') $(this).attr('data-default', $(this).attr('src'));
var medium = $(this).data('medium');
if (medium) $(this).attr('src', medium);
});
}
},
{
context: 'wide',
match: function() {
$('img').each(function() {
if (typeof $(this).data('default') == 'undefined') $(this).attr('data-default', $(this).attr('src'));
var large = $(this).data('large');
if (large) $(this).attr('src', large);
});
}
}
];
MQ.init(queries);
}
$.fn.isOnScreen = function(percentage) {
percentage = percentage || 0;
var win = $(window);
var viewport = {
top : win.scrollTop(),
left : win.scrollLeft()
};
viewport.right = viewport.left + win.width();
viewport.bottom = viewport.top + win.height();
var bounds = this.offset();
var height = this.outerHeight();
var width = this.outerWidth();
bounds.right = bounds.left + height;
bounds.bottom = bounds.top + width;
return (!(
viewport.right < bounds.left + width * percentage ||
viewport.left > bounds.right - width * percentage ||
viewport.bottom < bounds.top + height * percentage ||
viewport.top > bounds.bottom - height * percentage
));
};
})(window.jQuery);<|fim▁end|> | $('html, body').animate({ scrollTop: 0 }, 500); |
<|file_name|>IndexRequestServerTester.java<|end_file_name|><|fim▁begin|>/* $Id$
* $Revision$
* $Date$
* $Author$
*
* The Netarchive Suite - Software to harvest and preserve websites
* Copyright 2004-2012 The Royal Danish Library, the Danish State and
* University Library, the National Library of France and the Austrian
* National Library.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package dk.netarkivet.harvester.indexserver.distribute;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import junit.framework.TestCase;
import dk.netarkivet.common.distribute.ChannelID;
import dk.netarkivet.common.distribute.JMSConnectionFactory;
import dk.netarkivet.common.distribute.JMSConnectionMockupMQ;
import dk.netarkivet.common.distribute.RemoteFile;
import dk.netarkivet.common.distribute.indexserver.RequestType;
import dk.netarkivet.common.exceptions.ArgumentNotValid;
import dk.netarkivet.common.utils.FileUtils;
import dk.netarkivet.harvester.indexserver.FileBasedCache;
import dk.netarkivet.harvester.indexserver.MockupMultiFileBasedCache;
import dk.netarkivet.harvester.indexserver.distribute.IndexRequestMessage;
import dk.netarkivet.harvester.indexserver.distribute.IndexRequestServer;
import dk.netarkivet.testutils.ClassAsserts;
import dk.netarkivet.testutils.GenericMessageListener;
import dk.netarkivet.testutils.preconfigured.*;
public class IndexRequestServerTester extends TestCase {
private static final Set<Long> JOB_SET = new HashSet<Long>(Arrays.asList(
new Long[]{2L, 4L, 8L, 16L, 32L}));
private static final Set<Long> JOB_SET2 = new HashSet<Long>(Arrays.asList(
new Long[]{1L, 3L, 7L, 15L, 31L}));
IndexRequestServer server;
private UseTestRemoteFile ulrf = new UseTestRemoteFile();
private PreventSystemExit pse = new PreventSystemExit();
private PreserveStdStreams pss = new PreserveStdStreams();
private MoveTestFiles mtf = new MoveTestFiles(TestInfo.ORIGINALS_DIR,
TestInfo.WORKING_DIR);
private MockupJMS mjms = new MockupJMS();
private MockupMultiFileBasedCache mmfbc = new MockupMultiFileBasedCache();
ReloadSettings rs = new ReloadSettings();
public void setUp() {
rs.setUp();
ulrf.setUp();
mjms.setUp();
mtf.setUp();
pss.setUp();
pse.setUp();
mmfbc.setUp();
}
public void tearDown() {
if (server != null) {
server.close();
}
mmfbc.tearDown();
pse.tearDown();
pss.tearDown();
mtf.tearDown();
mjms.tearDown();
ulrf.tearDown();
rs.tearDown();
}
/**
* Verify that factory method - does not throw exception - returns non-null
* value.
*/
public void testGetInstance() {
assertNotNull("Factory method should return non-null object",
IndexRequestServer.getInstance());
server = ClassAsserts.assertSingleton(IndexRequestServer.class);
}
/**
* Verify that visit() - throws exception on null message or message that is
* not ok - returns a non-ok message if handler fails with exception or no
* handler registered
*/
public void testVisitFailures() throws InterruptedException {
server = IndexRequestServer.getInstance();
mmfbc.setMode(MockupMultiFileBasedCache.Mode.FAILING);
server.setHandler(RequestType.CDX, mmfbc);
server.start();
try {
server.visit((IndexRequestMessage) null);
fail("Should throw ArgumentNotValid on null");
} catch (ArgumentNotValid e) {
//expected
}
IndexRequestMessage irMsg = new IndexRequestMessage(
RequestType.CDX, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irMsg, "irMsg1");
GenericMessageListener listener = new GenericMessageListener();
JMSConnectionMockupMQ conn
= (JMSConnectionMockupMQ) JMSConnectionFactory.getInstance();
conn.setListener(irMsg.getReplyTo(), listener);
server.visit(irMsg);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertEquals("Should have received reply",
1, listener.messagesReceived.size());
assertTrue("Should be the right type",
listener.messagesReceived.get(0)
instanceof IndexRequestMessage);
IndexRequestMessage msg
= (IndexRequestMessage) listener.messagesReceived.get(0);
assertEquals("Should be the right message",
irMsg.getID(), msg.getID());
assertFalse("Should not be OK", msg.isOk());
irMsg = new IndexRequestMessage(RequestType.DEDUP_CRAWL_LOG, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irMsg, "irMsg2");
server.visit(irMsg);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertEquals("Should have received reply",
2, listener.messagesReceived.size());
assertTrue("Should be the right type",
listener.messagesReceived.get(1)
instanceof IndexRequestMessage);
msg = (IndexRequestMessage) listener.messagesReceived.get(1);
assertEquals("Should be the right message",
irMsg.getID(), msg.getID());
assertFalse("Should not be OK", msg.isOk());
irMsg = new IndexRequestMessage(RequestType.DEDUP_CRAWL_LOG, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irMsg, "irMsg3");
}
/**
* Verify that visit() - extracts correct info from message - calls the
* appropriate handler - encodes the return value appropriately - sends
* message back as reply
*/
public void testVisitNormal() throws IOException, InterruptedException {
for (RequestType t : RequestType.values()) {
subtestVisitNormal(t);
}
}
private void subtestVisitNormal(RequestType t) throws IOException,
InterruptedException {
//Start server and set a handler
mmfbc.tearDown();
mmfbc.setUp();
mmfbc.setMode(MockupMultiFileBasedCache.Mode.REPLYING);
server = IndexRequestServer.getInstance();
server.setHandler(t, mmfbc);
server.start();
//A message to visit with
IndexRequestMessage irm = new IndexRequestMessage(t, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irm, "irm-1");
//Listen for replies
GenericMessageListener listener = new GenericMessageListener();
JMSConnectionMockupMQ conn
= (JMSConnectionMockupMQ) JMSConnectionFactory.getInstance();
ChannelID channelID = irm.getReplyTo();
conn.setListener(channelID, listener);
//Execute visit
server.visit(irm);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertHandlerCalledWithParameter(mmfbc);
//Check reply is sent
assertEquals("Should have received reply",
1, listener.messagesReceived.size());
assertTrue("Should be the right type",
listener.messagesReceived.get(0)
instanceof IndexRequestMessage);
IndexRequestMessage msg
= (IndexRequestMessage) listener.messagesReceived.get(0);
assertEquals("Should be the right message",
irm.getID(), msg.getID());
assertTrue("Should be OK", msg.isOk());
//Check contents of file replied
File extractFile = File.createTempFile("extr", "act",
TestInfo.WORKING_DIR);
assertFalse("Message should not indicate directory",
msg.isIndexIsStoredInDirectory());<|fim▁hole|>
// Order in the JOB_SET and the extract file can't be guaranteed
// So we are comparing between the contents of the two sets, not
// the order, which is dubious in relation to sets anyway.
Set<Long> longFromExtractFile = new HashSet<Long>();
FileInputStream fis = new FileInputStream(extractFile);
try {
for (int i = 0; i < JOB_SET.size(); i++) {
longFromExtractFile.add(Long.valueOf(fis.read()));
}
assertEquals("End of file expected after this",
-1, fis.read());
} catch (IOException e) {
fail("Exception thrown: " + e);
} finally {
if (fis != null) {
fis.close();
}
}
assertTrue(
"JOBSET, and the contents of extractfile should be identical",
longFromExtractFile.containsAll(JOB_SET));
FileUtils.remove(mmfbc.getCacheFile(JOB_SET));
conn.removeListener(channelID, listener);
}
/**
* Verify that a message sent to the index server queue is dispatched to the
* appropriate handler if non-null and ok. Verify that no call is made if
* message is null or not ok.
*/
public void testIndexServerListener() throws InterruptedException {
//Start server and set a handler
server = IndexRequestServer.getInstance();
server.setHandler(RequestType.CDX, mmfbc);
server.start();
Thread.sleep(200); // necessary for the unittest to pass
//Send OK message
IndexRequestMessage irm = new IndexRequestMessage(RequestType.CDX,
JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irm, "ID-0");
JMSConnectionMockupMQ conn
= (JMSConnectionMockupMQ) JMSConnectionFactory.getInstance();
conn.send(irm);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertHandlerCalledWithParameter(mmfbc);
//Send not-OK message
irm = new IndexRequestMessage(RequestType.CDX, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irm, "ID-1");
irm.setNotOk("Not OK");
conn.send(irm);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
//Check handler is NOT called
assertEquals("Should NOT have called handler again", 1,
mmfbc.cacheCalled);
}
/**
* Verify that - setHandler() throws exception on null values - calling
* setHandler twice on same type replaces first handler
*/
public void testSetHandler() throws InterruptedException {
server = IndexRequestServer.getInstance();
try {
server.setHandler(RequestType.CDX, (FileBasedCache<Set<Long>>)null);
fail("should have thrown exception on null value.");
} catch (ArgumentNotValid e) {
//expected
}
server = IndexRequestServer.getInstance();
try {
server.setHandler(null, mmfbc);
fail("should have thrown exception on null value.");
} catch (ArgumentNotValid e) {
//expected
}
//Start server and set a handler
server = IndexRequestServer.getInstance();
server.setHandler(RequestType.CDX, mmfbc);
//A message to visit with
IndexRequestMessage irm = new IndexRequestMessage(RequestType.CDX,
JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irm, "dummyID");
//Execute visit
server.visit(irm);
JMSConnectionMockupMQ conn
= (JMSConnectionMockupMQ) JMSConnectionFactory.getInstance();
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertHandlerCalledWithParameter(mmfbc);
//Set new handler
MockupMultiFileBasedCache mjic2 = new MockupMultiFileBasedCache();
mjic2.setUp();
server.setHandler(RequestType.CDX, mjic2);
//Execute new visit
irm = new IndexRequestMessage(RequestType.CDX, JOB_SET, null);
JMSConnectionMockupMQ.updateMsgID(irm, "dummyID");
server.visit(irm);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
//Check the first handler is not called again
assertEquals("Handler should NOT be called", 1,
mmfbc.cacheCalled);
assertHandlerCalledWithParameter(mjic2);
mjic2.tearDown();
}
public void testUnblocking() throws InterruptedException {
mmfbc.setMode(MockupMultiFileBasedCache.Mode.WAITING);
server = IndexRequestServer.getInstance();
server.setHandler(RequestType.CDX, mmfbc);
server.start();
Thread.sleep(200); // necessary for the unittest to pass
//A message to visit with
IndexRequestMessage irm = new IndexRequestMessage(RequestType.CDX,
JOB_SET, null);
//Another message to visit with
IndexRequestMessage irm2 = new IndexRequestMessage(RequestType.CDX,
JOB_SET2, null);
//Listen for replies
GenericMessageListener listener = new GenericMessageListener();
JMSConnectionMockupMQ conn
= (JMSConnectionMockupMQ) JMSConnectionFactory.getInstance();
conn.setListener(irm.getReplyTo(), listener);
//Send both messages
conn.send(irm);
conn.send(irm2);
conn.waitForConcurrentTasksToFinish();
//Give a little time to reply
Thread.sleep(200);
conn.waitForConcurrentTasksToFinish();
assertEquals("Should have replies from both messages",
2, listener.messagesReceived.size());
//Now, we test that the threads have actually run simultaneously, and
//have awaken each other; not just timed out.
assertTrue("Threads should have been woken up", mmfbc.woken);
}
private void assertHandlerCalledWithParameter(
MockupMultiFileBasedCache mjic) {
//Check the handler is called
assertEquals("Handler should be called", 1, mjic.cacheCalled);
assertEquals("Handler should be called with right parameter",
JOB_SET, mjic.cacheParameter);
}
}<|fim▁end|> | RemoteFile resultFile = msg.getResultFile();
resultFile.copyTo(extractFile); |
<|file_name|>FZH04.py<|end_file_name|><|fim▁begin|>import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def dlnBFdlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):<|fim▁hole|>def RcovEul(del0,z):
return RphysoR0(del0,z)*(1+z)
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))<|fim▁end|> | return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th)) |
<|file_name|>ng-file-upload-shim.js<|end_file_name|><|fim▁begin|>/**!
* AngularJS file upload/drop directive and service with progress and abort
* FileAPI Flash shim for old browsers not supporting FormData
* @author Danial <[email protected]>
* @version 7.0.5
*/
(function () {
/** @namespace FileAPI.noContentTimeout */
function patchXHR(fnName, newFn) {
window.XMLHttpRequest.prototype[fnName] = newFn(window.XMLHttpRequest.prototype[fnName]);
}
function redefineProp(xhr, prop, fn) {
try {
Object.defineProperty(xhr, prop, {get: fn});
} catch (e) {/*ignore*/
}
}
if (!window.FileAPI) {
window.FileAPI = {};
}
FileAPI.shouldLoad = (window.XMLHttpRequest && !window.FormData) || FileAPI.forceLoad;
if (FileAPI.shouldLoad) {
var initializeUploadListener = function (xhr) {
if (!xhr.__listeners) {
if (!xhr.upload) xhr.upload = {};
xhr.__listeners = [];
var origAddEventListener = xhr.upload.addEventListener;
xhr.upload.addEventListener = function (t, fn) {
xhr.__listeners[t] = fn;
if (origAddEventListener) origAddEventListener.apply(this, arguments);
};
}
};
patchXHR('open', function (orig) {
return function (m, url, b) {
initializeUploadListener(this);
this.__url = url;
try {
orig.apply(this, [m, url, b]);
} catch (e) {
if (e.message.indexOf('Access is denied') > -1) {
this.__origError = e;
orig.apply(this, [m, '_fix_for_ie_crossdomain__', b]);
}
}
};
});
patchXHR('getResponseHeader', function (orig) {
return function (h) {
return this.__fileApiXHR && this.__fileApiXHR.getResponseHeader ? this.__fileApiXHR.getResponseHeader(h) : (orig == null ? null : orig.apply(this, [h]));
};
});
patchXHR('getAllResponseHeaders', function (orig) {
return function () {
return this.__fileApiXHR && this.__fileApiXHR.getAllResponseHeaders ? this.__fileApiXHR.getAllResponseHeaders() : (orig == null ? null : orig.apply(this));
};
});
patchXHR('abort', function (orig) {
return function () {
return this.__fileApiXHR && this.__fileApiXHR.abort ? this.__fileApiXHR.abort() : (orig == null ? null : orig.apply(this));
};
});
patchXHR('setRequestHeader', function (orig) {
return function (header, value) {
if (header === '__setXHR_') {
initializeUploadListener(this);
var val = value(this);
// fix for angular < 1.2.0
if (val instanceof Function) {
val(this);
}
} else {
this.__requestHeaders = this.__requestHeaders || {};
this.__requestHeaders[header] = value;
orig.apply(this, arguments);
}
};
});
patchXHR('send', function (orig) {
return function () {
var xhr = this;
if (arguments[0] && arguments[0].__isFileAPIShim) {
var formData = arguments[0];
var config = {
url: xhr.__url,
jsonp: false, //removes the callback form param
cache: true, //removes the ?fileapiXXX in the url
complete: function (err, fileApiXHR) {
xhr.__completed = true;
if (!err && xhr.__listeners.load)
xhr.__listeners.load({
type: 'load',
loaded: xhr.__loaded,
total: xhr.__total,
target: xhr,
lengthComputable: true
});
if (!err && xhr.__listeners.loadend)
xhr.__listeners.loadend({
type: 'loadend',
loaded: xhr.__loaded,
total: xhr.__total,
target: xhr,
lengthComputable: true
});
if (err === 'abort' && xhr.__listeners.abort)
xhr.__listeners.abort({
type: 'abort',
loaded: xhr.__loaded,
total: xhr.__total,
target: xhr,
lengthComputable: true
});
if (fileApiXHR.status !== undefined) redefineProp(xhr, 'status', function () {
return (fileApiXHR.status === 0 && err && err !== 'abort') ? 500 : fileApiXHR.status;
});
if (fileApiXHR.statusText !== undefined) redefineProp(xhr, 'statusText', function () {
return fileApiXHR.statusText;
});
redefineProp(xhr, 'readyState', function () {
return 4;
});
if (fileApiXHR.response !== undefined) redefineProp(xhr, 'response', function () {
return fileApiXHR.response;
});
var resp = fileApiXHR.responseText || (err && fileApiXHR.status === 0 && err !== 'abort' ? err : undefined);
redefineProp(xhr, 'responseText', function () {
return resp;
});
redefineProp(xhr, 'response', function () {
return resp;
});
if (err) redefineProp(xhr, 'err', function () {
return err;
});
xhr.__fileApiXHR = fileApiXHR;
if (xhr.onreadystatechange) xhr.onreadystatechange();
if (xhr.onload) xhr.onload();
},
progress: function (e) {
e.target = xhr;
if (xhr.__listeners.progress) xhr.__listeners.progress(e);
xhr.__total = e.total;
xhr.__loaded = e.loaded;
if (e.total === e.loaded) {
// fix flash issue that doesn't call complete if there is no response text from the server
var _this = this;
setTimeout(function () {
if (!xhr.__completed) {
xhr.getAllResponseHeaders = function () {
};
_this.complete(null, {status: 204, statusText: 'No Content'});
}
}, FileAPI.noContentTimeout || 10000);
}
},
headers: xhr.__requestHeaders
};
config.data = {};
config.files = {};
for (var i = 0; i < formData.data.length; i++) {
var item = formData.data[i];
if (item.val != null && item.val.name != null && item.val.size != null && item.val.type != null) {
config.files[item.key] = item.val;
} else {
config.data[item.key] = item.val;
}
}
setTimeout(function () {
if (!FileAPI.hasFlash) {
throw 'Adode Flash Player need to be installed. To check ahead use "FileAPI.hasFlash"';
}
xhr.__fileApiXHR = FileAPI.upload(config);
}, 1);
} else {
if (this.__origError) {
throw this.__origError;
}
orig.apply(xhr, arguments);
}
};
});
window.XMLHttpRequest.__isFileAPIShim = true;
window.FormData = FormData = function () {
return {
append: function (key, val, name) {
if (val.__isFileAPIBlobShim) {
val = val.data[0];
}
this.data.push({
key: key,
val: val,
name: name
});
},
data: [],
__isFileAPIShim: true
};
};
window.Blob = Blob = function (b) {
return {
data: b,
__isFileAPIBlobShim: true
};
};
}
})();
(function () {
/** @namespace FileAPI.forceLoad */
/** @namespace window.FileAPI.jsUrl */
/** @namespace window.FileAPI.jsPath */
function isInputTypeFile(elem) {
return elem[0].tagName.toLowerCase() === 'input' && elem.attr('type') && elem.attr('type').toLowerCase() === 'file';
}
function hasFlash() {
try {
var fo = new ActiveXObject('ShockwaveFlash.ShockwaveFlash');
if (fo) return true;
} catch (e) {
if (navigator.mimeTypes['application/x-shockwave-flash'] !== undefined) return true;
}
return false;
}
function getOffset(obj) {
var left = 0, top = 0;
if (window.jQuery) {
return jQuery(obj).offset();
}
if (obj.offsetParent) {
do {
left += (obj.offsetLeft - obj.scrollLeft);
top += (obj.offsetTop - obj.scrollTop);
obj = obj.offsetParent;
} while (obj);
}
return {
left: left,
top: top
};
}
if (FileAPI.shouldLoad) {
//load FileAPI
if (FileAPI.forceLoad) {
FileAPI.html5 = false;
}
if (!FileAPI.upload) {
var jsUrl, basePath, script = document.createElement('script'), allScripts = document.getElementsByTagName('script'), i, index, src;
if (window.FileAPI.jsUrl) {
jsUrl = window.FileAPI.jsUrl;
} else if (window.FileAPI.jsPath) {
basePath = window.FileAPI.jsPath;
} else {
for (i = 0; i < allScripts.length; i++) {
src = allScripts[i].src;
index = src.search(/\/ng\-file\-upload[\-a-zA-z0-9\.]*\.js/);
if (index > -1) {
basePath = src.substring(0, index + 1);
break;
}
}
}
if (FileAPI.staticPath == null) FileAPI.staticPath = basePath;
script.setAttribute('src', jsUrl || basePath + 'FileAPI.min.js');
document.getElementsByTagName('head')[0].appendChild(script);
FileAPI.hasFlash = hasFlash();
}
FileAPI.ngfFixIE = function (elem, fileElem, changeFn) {
if (!hasFlash()) {
throw 'Adode Flash Player need to be installed. To check ahead use "FileAPI.hasFlash"';
}
var fixInputStyle = function () {
if (elem.attr('disabled')) {
if (fileElem) fileElem.removeClass('js-fileapi-wrapper');
} else {
if (!fileElem.attr('__ngf_flash_')) {
fileElem.unbind('change');
fileElem.unbind('click');
fileElem.bind('change', function (evt) {
fileApiChangeFn.apply(this, [evt]);
changeFn.apply(this, [evt]);
});
fileElem.attr('__ngf_flash_', 'true');
}
fileElem.addClass('js-fileapi-wrapper');
if (!isInputTypeFile(elem)) {
fileElem.css('position', 'absolute')
.css('top', getOffset(elem[0]).top + 'px').css('left', getOffset(elem[0]).left + 'px')
.css('width', elem[0].offsetWidth + 'px').css('height', elem[0].offsetHeight + 'px')
.css('filter', 'alpha(opacity=0)').css('display', elem.css('display'))
.css('overflow', 'hidden').css('z-index', '900000')
.css('visibility', 'visible');
}
}
};
elem.bind('mouseenter', fixInputStyle);
var fileApiChangeFn = function (evt) {
var files = FileAPI.getFiles(evt);
//just a double check for #233
for (var i = 0; i < files.length; i++) {
if (files[i].size === undefined) files[i].size = 0;
if (files[i].name === undefined) files[i].name = 'file';
if (files[i].type === undefined) files[i].type = 'undefined';
}
if (!evt.target) {
evt.target = {};
}
evt.target.files = files;
// if evt.target.files is not writable use helper field
if (evt.target.files !== files) {
evt.__files_ = files;
}
(evt.__files_ || evt.target.files).item = function (i) {
return (evt.__files_ || evt.target.files)[i] || null;
};
};
};
FileAPI.disableFileInput = function (elem, disable) {
if (disable) {
elem.removeClass('js-fileapi-wrapper');
} else {
elem.addClass('js-fileapi-wrapper');
}
};
}
})();
if (!window.FileReader) {
window.FileReader = function () {
var _this = this, loadStarted = false;
this.listeners = {};
this.addEventListener = function (type, fn) {
_this.listeners[type] = _this.listeners[type] || [];
_this.listeners[type].push(fn);
};
this.removeEventListener = function (type, fn) {
if (_this.listeners[type]) _this.listeners[type].splice(_this.listeners[type].indexOf(fn), 1);
};
this.dispatchEvent = function (evt) {
var list = _this.listeners[evt.type];
if (list) {
for (var i = 0; i < list.length; i++) {
list[i].call(_this, evt);
}
}
};
this.onabort = this.onerror = this.onload = this.onloadstart = this.onloadend = this.onprogress = null;
var constructEvent = function (type, evt) {
var e = {type: type, target: _this, loaded: evt.loaded, total: evt.total, error: evt.error};
if (evt.result != null) e.target.result = evt.result;
return e;
};
var listener = function (evt) {
if (!loadStarted) {
loadStarted = true;
if (_this.onloadstart) _this.onloadstart(constructEvent('loadstart', evt));
}
var e;
if (evt.type === 'load') {
if (_this.onloadend) _this.onloadend(constructEvent('loadend', evt));
e = constructEvent('load', evt);
if (_this.onload) _this.onload(e);
_this.dispatchEvent(e);
} else if (evt.type === 'progress') {
e = constructEvent('progress', evt);
if (_this.onprogress) _this.onprogress(e);
_this.dispatchEvent(e);
} else {
e = constructEvent('error', evt);
if (_this.onerror) _this.onerror(e);<|fim▁hole|> this.readAsArrayBuffer = function (file) {
FileAPI.readAsBinaryString(file, listener);
};
this.readAsBinaryString = function (file) {
FileAPI.readAsBinaryString(file, listener);
};
this.readAsDataURL = function (file) {
FileAPI.readAsDataURL(file, listener);
};
this.readAsText = function (file) {
FileAPI.readAsText(file, listener);
};
};
}<|fim▁end|> | _this.dispatchEvent(e);
}
}; |
<|file_name|>test_text.py<|end_file_name|><|fim▁begin|># This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]><|fim▁hole|># Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.exporter.test_text
================================
Test definitions for plainbox.impl.exporter.text module
"""
from io import BytesIO
from unittest import TestCase
from plainbox.impl.exporter.text import TextSessionStateExporter
class TextSessionStateExporterTests(TestCase):
def test_default_dump(self):
exporter = TextSessionStateExporter()
# Text exporter expects this data format
data = {'result_map': {'job_name': {'outcome': 'fail'}}}
stream = BytesIO()
exporter.dump(data, stream)
expected_bytes = "job_name: fail\n".encode('UTF-8')
self.assertEqual(stream.getvalue(), expected_bytes)<|fim▁end|> | # Daniel Manrique <[email protected]>
# |
<|file_name|>node_api_CreateStoragePool.go<|end_file_name|><|fim▁begin|>package node
import (
"encoding/json"
"fmt"
"net/http"
"github.com/gorilla/mux"
"github.com/zero-os/0-core/client/go-client"
"github.com/zero-os/0-orchestrator/api/tools"
)
// CreateStoragePool is the handler for POST /nodes/{nodeid}/storagepools
// Create a new storage pool
func (api *NodeAPI) CreateStoragePool(w http.ResponseWriter, r *http.Request) {
aysClient, err := tools.GetAysConnection(api)
if err != nil {
tools.WriteError(w, http.StatusUnauthorized, err, "")
return
}
var reqBody StoragePoolCreate
node := mux.Vars(r)["nodeid"]
// decode request
if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil {
errmsg := "Error decoding request for storagepool creation"
tools.WriteError(w, http.StatusBadRequest, err, errmsg)
return
}
// validate request
if err := reqBody.Validate(); err != nil {
tools.WriteError(w, http.StatusBadRequest, err, "")
return
}
devices, err := api.GetNodeDevices(w, r)
if err != nil {
tools.WriteError(w, http.StatusInternalServerError, err, "Failed to get Node device")
return
}
type partitionMap struct {
Device string `yaml:"device" json:"device"`
PartUUID string `yaml:"partUUID" json:"partUUID"`
}
bpContent := struct {
DataProfile EnumStoragePoolCreateDataProfile `yaml:"dataProfile" json:"dataProfile"`
Devices []partitionMap `yaml:"devices" json:"devices"`
MetadataProfile EnumStoragePoolCreateMetadataProfile `yaml:"metadataProfile" json:"metadataProfile"`
Node string `yaml:"node" json:"node"`
}{
DataProfile: reqBody.DataProfile,
MetadataProfile: reqBody.MetadataProfile,
Node: node,
}
for _, device := range reqBody.Devices {
_, ok := devices[device]
if !ok {
err := fmt.Errorf("Device %v doesn't exist", device)
tools.WriteError(w, http.StatusBadRequest, err, "")
return
}
bpContent.Devices = append(bpContent.Devices, partitionMap{Device: device})
}
blueprint := map[string]interface{}{
fmt.Sprintf("storagepool__%s", reqBody.Name): bpContent,
"actions": []tools.ActionBlock{{
Action: "install",
Actor: "storagepool",
Service: reqBody.Name}},
}
run, err := aysClient.ExecuteBlueprint(api.AysRepo, "storagepool", reqBody.Name, "install", blueprint)
errmsg := "Error executing blueprint for storagepool creation "
if !tools.HandleExecuteBlueprintResponse(err, w, errmsg) {
return
}
if _, errr := tools.WaitOnRun(api, w, r, run.Key); errr != nil {
return
}
w.Header().Set("Location", fmt.Sprintf("/nodes/%s/storagepools/%s", node, reqBody.Name))
w.WriteHeader(http.StatusCreated)
}<|fim▁hole|>func (api *NodeAPI) GetNodeDevices(w http.ResponseWriter, r *http.Request) (map[string]struct{}, error) {
cl, err := tools.GetConnection(r, api)
if err != nil {
return nil, err
}
diskClient := client.Disk(cl)
disks, err := diskClient.List()
if err != nil {
return nil, err
}
devices := make(map[string]struct{})
for _, dev := range disks.BlockDevices {
devices[fmt.Sprintf("/dev/%v", dev.Kname)] = struct{}{}
}
return devices, nil
}<|fim▁end|> | |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>module.exports = function (ctx, next) {<|fim▁hole|><|fim▁end|> | if (ctx.method === 'GET' && ctx.path === '/ping') {
ctx.body = 'pong'
}
} |
<|file_name|>cluster_configurer.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
<|fim▁hole|> def get_hadoop_conf_dir(self):
return '/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop'
def is_node_awareness_enabled(self):
return False<|fim▁end|> |
class ClusterConfigurer(bcc.BaseClusterConfigurer):
|
<|file_name|>errors.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::WhichLine::*;
use std::io::{BufferedReader, File};
pub struct ExpectedError {
pub line: uint,
pub kind: String,
pub msg: String,
}
#[derive(PartialEq, Show)]
enum WhichLine { ThisLine, FollowPrevious(uint), AdjustBackward(uint) }
/// Looks for either "//~| KIND MESSAGE" or "//~^^... KIND MESSAGE"
/// The former is a "follow" that inherits its target from the preceding line;
/// the latter is an "adjusts" that goes that many lines up.
///
/// Goal is to enable tests both like: //~^^^ ERROR go up three
/// and also //~^ ERROR message one for the preceding line, and
/// //~| ERROR message two for that same line.
// Load any test directives embedded in the file
pub fn load_errors(testfile: &Path) -> Vec<ExpectedError> {
let mut rdr = BufferedReader::new(File::open(testfile).unwrap());
// `last_nonfollow_error` tracks the most recently seen
// line with an error template that did not use the
// follow-syntax, "//~| ...".
//
// (pnkfelix could not find an easy way to compose Iterator::scan
// and Iterator::filter_map to pass along this information into
// `parse_expected`. So instead I am storing that state here and
// updating it in the map callback below.)
let mut last_nonfollow_error = None;
rdr.lines().enumerate().filter_map(|(line_no, ln)| {
parse_expected(last_nonfollow_error,
line_no + 1,
ln.unwrap().as_slice())
.map(|(which, error)| {
match which {
FollowPrevious(_) => {}
_ => last_nonfollow_error = Some(error.line),
}
error
})
}).collect()
}
fn parse_expected(last_nonfollow_error: Option<uint>,
line_num: uint,
line: &str) -> Option<(WhichLine, ExpectedError)> {
let start = match line.find_str("//~") { Some(i) => i, None => return None };
let (follow, adjusts) = if line.char_at(start + 3) == '|' {
(true, 0)<|fim▁hole|> let letters = line[kind_start..].chars();
let kind = letters.skip_while(|c| c.is_whitespace())
.take_while(|c| !c.is_whitespace())
.map(|c| c.to_lowercase())
.collect::<String>();
let letters = line[kind_start..].chars();
let msg = letters.skip_while(|c| c.is_whitespace())
.skip_while(|c| !c.is_whitespace())
.collect::<String>().trim().to_string();
let (which, line) = if follow {
assert!(adjusts == 0, "use either //~| or //~^, not both.");
let line = last_nonfollow_error.unwrap_or_else(|| {
panic!("encountered //~| without preceding //~^ line.")
});
(FollowPrevious(line), line)
} else {
let which =
if adjusts > 0 { AdjustBackward(adjusts) } else { ThisLine };
let line = line_num - adjusts;
(which, line)
};
debug!("line={} which={:?} kind={:?} msg={:?}", line_num, which, kind, msg);
Some((which, ExpectedError { line: line,
kind: kind,
msg: msg, }))
}<|fim▁end|> | } else {
(false, line[start + 3..].chars().take_while(|c| *c == '^').count())
};
let kind_start = start + 3 + adjusts + (follow as usize); |
<|file_name|>signature.go<|end_file_name|><|fim▁begin|>// Note: Consider the API unstable until the code supports at least three different image formats or transports.
// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
package signature
import (
"encoding/json"
"fmt"
"time"
"github.com/containers/image/v5/version"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
const (
signatureType = "atomic container signature"
)
// InvalidSignatureError is returned when parsing an invalid signature.
type InvalidSignatureError struct {
msg string
}
func (err InvalidSignatureError) Error() string {
return err.msg
}
// Signature is a parsed content of a signature.
// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
type Signature struct {
DockerManifestDigest digest.Digest
DockerReference string // FIXME: more precise type?
}
// untrustedSignature is a parsed content of a signature.
type untrustedSignature struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
UntrustedTimestamp *int64
}
// UntrustedSignatureInformation is information available in an untrusted signature.
// This may be useful when debugging signature verification failures,
// or when managing a set of signatures on a single image.
//
// WARNING: Do not use the contents of this for ANY security decisions,
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
// There is NO REASON to expect the values to be correct, or not intentionally misleading
// (including things like “✅ Verified by $authority”)
type UntrustedSignatureInformation struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
UntrustedTimestamp *time.Time
UntrustedShortKeyIdentifier string
}
// newUntrustedSignature returns an untrustedSignature object with
// the specified primary contents and appropriate metadata.
func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
// Use intermediate variables for these values so that we can take their addresses.
// Golang guarantees that they will have a new address on every execution.
creatorID := "atomic " + version.Version
timestamp := time.Now().Unix()
return untrustedSignature{
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: ×tamp,
}
}
// Compile-time check that untrustedSignature implements json.Marshaler
var _ json.Marshaler = (*untrustedSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
}
optional := map[string]interface{}{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
}
signature := map[string]interface{}{
"critical": critical,
"optional": optional,
}
return json.Marshal(signature)
}
// Compile-time check that untrustedSignature implements json.Unmarshaler
var _ json.Unmarshaler = (*untrustedSignature)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
if formatErr, ok := err.(jsonFormatError); ok {
err = InvalidSignatureError{msg: formatErr.Error()}
}
}
return err
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"critical": &critical,
"optional": &optional,
}); err != nil {
return err
}
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
switch key {
case "creator":
gotCreatorID = true
return &creatorID
case "timestamp":
gotTimestamp = true
return ×tamp
default:
var ignore interface{}
return &ignore
}
}); err != nil {
return err
}
if gotCreatorID {
s.UntrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
}
s.UntrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
"type": &t,
"image": &image,
"identity": &identity,
}); err != nil {
return err
}
if t != signatureType {
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
}
var digestString string
if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
})
}
// Sign formats the signature and returns a blob signed using mech and keyIdentity
// (If it seems surprising that this is a method on untrustedSignature, note that there
// isn’t a good reason to think that a key used by the user is trusted by any component
// of the system just because it is a private key — actually the presence of a private key
// on the system increases the likelihood of an a successful attack on that private key
// on that particular system.)
func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
json, err := json.Marshal(s)
if err != nil {
return nil, err
}
if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
}
if passphrase != "" {
return nil, errors.New("signing mechanism does not support passphrases")
}
return mech.Sign(json, keyIdentity)
}
// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
// because the functions have the same or similar types, so there is a risk of exchanging the functions;
// named members of this struct are more explicit.
type signatureAcceptanceRules struct {
validateKeyIdentity func(string) error
validateSignedDockerReference func(string) error
validateSignedDockerManifestDigest func(digest.Digest) error
}
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
// match expected values, both as specified by rules, and returns it
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
if err != nil {
return nil, err
}
if err := rules.validateKeyIdentity(keyIdentity); err != nil {
return nil, err
}
var unmatchedSignature untrustedSignature
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, InvalidSignatureError{msg: err.Error()}<|fim▁hole|> }
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
return nil, err
}
// signatureAcceptanceRules have accepted this value.
return &Signature{
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
DockerReference: unmatchedSignature.UntrustedDockerReference,
}, nil
}
// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
// WITHOUT doing any cryptographic verification.
// This may be useful when debugging signature verification failures,
// or when managing a set of signatures on a single image.
//
// WARNING: Do not use the contents of this for ANY security decisions,
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
// There is NO REASON to expect the values to be correct, or not intentionally misleading
// (including things like “✅ Verified by $authority”)
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
// NOTE: This should eventually do format autodetection.
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
if err != nil {
return nil, err
}
defer mech.Close()
untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
if err != nil {
return nil, err
}
var untrustedDecodedContents untrustedSignature
if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
return nil, InvalidSignatureError{msg: err.Error()}
}
var timestamp *time.Time // = nil
if untrustedDecodedContents.UntrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
timestamp = &ts
}
return &UntrustedSignatureInformation{
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
UntrustedTimestamp: timestamp,
UntrustedShortKeyIdentifier: shortKeyIdentifier,
}, nil
}<|fim▁end|> | }
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
return nil, err |
<|file_name|>partitioning.py<|end_file_name|><|fim▁begin|># partitioning.py
# Disk partitioning functions.
#
# Copyright (C) 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <[email protected]>
#
from operator import gt, lt
from decimal import Decimal
import functools
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
import parted
from .errors import DeviceError, PartitioningError
from .flags import flags
from .devices import Device, PartitionDevice, LUKSDevice, devicePathToName
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
import logging
log = logging.getLogger("blivet")
def partitionCompare(part1, part2):
""" More specifically defined partitions come first.
< 1 => x < y
0 => x == y
> 1 => x > y
:param part1: the first partition
:type part1: :class:`devices.PartitionDevice`
:param part2: the other partition
:type part2: :class:`devices.PartitionDevice`
:return: see above
:rtype: int
"""
ret = 0
# start sector overrides all other sorting factors
part1_start = part1.req_start_sector
part2_start = part2.req_start_sector
if part1_start is not None and part2_start is None:
return -1
elif part1_start is None and part2_start is not None:
return 1
elif part1_start is not None and part2_start is not None:
return compare(part1_start, part2_start)
if part1.req_base_weight:
ret -= part1.req_base_weight
if part2.req_base_weight:
ret += part2.req_base_weight
# more specific disk specs to the front of the list
# req_disks being empty is equivalent to it being an infinitely long list
if part1.req_disks and not part2.req_disks:
ret -= 500
elif not part1.req_disks and part2.req_disks:
ret += 500
else:
ret += compare(len(part1.req_disks), len(part2.req_disks)) * 500
# primary-only to the front of the list
ret -= compare(part1.req_primary, part2.req_primary) * 200
# fixed size requests to the front
ret += compare(part1.req_grow, part2.req_grow) * 100
# larger requests go to the front of the list
ret -= compare(part1.req_base_size, part2.req_base_size) * 50
# potentially larger growable requests go to the front
if part1.req_grow and part2.req_grow:
if not part1.req_max_size and part2.req_max_size:
ret -= 25
elif part1.req_max_size and not part2.req_max_size:
ret += 25
else:
ret -= compare(part1.req_max_size, part2.req_max_size) * 25
# give a little bump based on mountpoint
if hasattr(part1.format, "mountpoint") and \
hasattr(part2.format, "mountpoint"):
ret += compare(part1.format.mountpoint, part2.format.mountpoint) * 10
if ret > 0:
ret = 1
elif ret < 0:
ret = -1
return ret
_partitionCompareKey = functools.cmp_to_key(partitionCompare)
def getNextPartitionType(disk, no_primary=None):
""" Return the type of partition to create next on a disk.
Return a parted partition type value representing the type of the
next partition we will create on this disk.
If there is only one free primary partition and we can create an
extended partition, we do that.
If there are free primary slots and an extended partition we will
recommend creating a primary partition. This can be overridden
with the keyword argument no_primary.
:param disk: the disk from which a partition may be allocated
:type disk: :class:`parted.Disk`
:keyword no_primary: refuse to return :const:`parted.PARTITION_NORMAL`
:returns: the chosen partition type
:rtype: a parted PARTITION_* constant
"""
part_type = None
extended = disk.getExtendedPartition()
supports_extended = disk.supportsFeature(parted.DISK_TYPE_EXTENDED)
logical_count = len(disk.getLogicalPartitions())
max_logicals = disk.getMaxLogicalPartitions()
primary_count = disk.primaryPartitionCount
if primary_count < disk.maxPrimaryPartitionCount:
if primary_count == disk.maxPrimaryPartitionCount - 1:
# can we make an extended partition? now's our chance.
if not extended and supports_extended:
part_type = parted.PARTITION_EXTENDED
elif not extended:
# extended partitions not supported. primary or nothing.
if not no_primary:
part_type = parted.PARTITION_NORMAL
else:
# there is an extended and a free primary
if not no_primary:
part_type = parted.PARTITION_NORMAL
elif logical_count < max_logicals:
# we have an extended with logical slots, so use one.
part_type = parted.PARTITION_LOGICAL
else:
# there are two or more primary slots left. use one unless we're
# not supposed to make primaries.
if not no_primary:
part_type = parted.PARTITION_NORMAL
elif extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
elif extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
return part_type
def getBestFreeSpaceRegion(disk, part_type, req_size, start=None,
boot=None, best_free=None, grow=None,
alignment=None):
""" Return the "best" free region on the specified disk.
For non-boot partitions, we return the largest free region on the
disk. For boot partitions, we return the first region that is
large enough to hold the partition.
Partition type (parted's PARTITION_NORMAL, PARTITION_LOGICAL) is
taken into account when locating a suitable free region.
For locating the best region from among several disks, the keyword
argument best_free allows the specification of a current "best"
free region with which to compare the best from this disk. The
overall best region is returned.
:param disk: the disk
:type disk: :class:`parted.Disk`
:param part_type: the type of partition we want to allocate
:type part_type: one of parted's PARTITION_* constants
:param req_size: the requested size of the partition in MiB
:type req_size: :class:`~.size.Size`
:keyword start: requested start sector for the partition
:type start: int
:keyword boot: whether this will be a bootable partition
:type boot: bool
:keyword best_free: current best free region for this partition
:type best_free: :class:`parted.Geometry`
:keyword grow: indicates whether this is a growable request
:type grow: bool
:keyword alignment: disk alignment requirements
:type alignment: :class:`parted.Alignment`
"""
log.debug("getBestFreeSpaceRegion: disk=%s part_type=%d req_size=%s "
"boot=%s best=%s grow=%s start=%s",
disk.device.path, part_type, req_size, boot, best_free, grow,
start)
extended = disk.getExtendedPartition()
alignment = alignment or parted.Alignment(offset=0, grainSize=1)
for free_geom in disk.getFreeSpaceRegions():
# align the start sector of the free region since we will be aligning
# the start sector of the partition
if start is not None and \
not alignment.isAligned(free_geom, free_geom.start):
log.debug("aligning start sector of region %d-%d", free_geom.start,
free_geom.end)
try:
aligned_start = alignment.alignUp(free_geom, free_geom.start)
except ArithmeticError:
aligned_start = None
else:
# parted tends to align down when it cannot align up
if aligned_start < free_geom.start:
aligned_start = None
if aligned_start is None:
log.debug("failed to align start sector -- skipping region")
continue
free_geom = parted.Geometry(device=free_geom.device,
start=aligned_start,
end=free_geom.end)
log.debug("checking %d-%d (%s)", free_geom.start, free_geom.end,
Size(free_geom.getLength(unit="B")))
if start is not None and not free_geom.containsSector(start):
log.debug("free region does not contain requested start sector")
continue
if extended:
in_extended = extended.geometry.contains(free_geom)
if ((in_extended and part_type == parted.PARTITION_NORMAL) or
(not in_extended and part_type == parted.PARTITION_LOGICAL)):
log.debug("free region not suitable for request")
continue
if free_geom.start > disk.maxPartitionStartSector:
log.debug("free range start sector beyond max for new partitions")
continue
if boot:
max_boot = Size("2 TiB")
free_start = Size(free_geom.start * disk.device.sectorSize)
req_end = free_start + req_size
if req_end > max_boot:
log.debug("free range position would place boot req above %s",
max_boot)
continue
log.debug("current free range is %d-%d (%s)", free_geom.start,
free_geom.end,
Size(free_geom.getLength(unit="B")))
free_size = Size(free_geom.getLength(unit="B"))
# For boot partitions, we want the first suitable region we find.
# For growable or extended partitions, we want the largest possible
# free region.
# For all others, we want the smallest suitable free region.
if grow or part_type == parted.PARTITION_EXTENDED:
op = gt
else:
op = lt
if req_size <= free_size:
if not best_free or op(free_geom.length, best_free.length):
best_free = free_geom
if boot:
# if this is a bootable partition we want to
# use the first freespace region large enough
# to satisfy the request
break
return best_free
def sectorsToSize(sectors, sectorSize):
""" Convert length in sectors to size.
:param sectors: sector count
:type sectors: int
:param sectorSize: sector size
:type sectorSize: :class:`~.size.Size`
:returns: the size
:rtype: :class:`~.size.Size`
"""
return Size(sectors * sectorSize)
def sizeToSectors(size, sectorSize):
""" Convert size to length in sectors.
:param size: size
:type size: :class:`~.size.Size`
:param sectorSize: sector size in bytes
:type sectorSize: :class:`~.size.Size`
:returns: sector count
:rtype: int
"""
return int(size // sectorSize)
def removeNewPartitions(disks, remove, all_partitions):
""" Remove newly added partitions from disks.
Remove all non-existent partitions from the disks in blivet's model.
:param: disks: list of partitioned disks
:type disks: list of :class:`~.devices.StorageDevice`
:param remove: list of partitions to remove
:type remove: list of :class:`~.devices.PartitionDevice`
:param all_partitions: list of all partitions on the disks
:type all_partitions: list of :class:`~.devices.PartitionDevice`
:returns: None
:rtype: NoneType
"""
log.debug("removing all non-preexisting partitions %s from disk(s) %s",
["%s(id %d)" % (p.name, p.id) for p in remove],
[d.name for d in disks])
for part in remove:
if part.partedPartition and part.disk in disks:
if part.exists:
# we're only removing partitions that don't physically exist
continue
if part.isExtended:
# these get removed last
continue
part.disk.format.partedDisk.removePartition(part.partedPartition)
part.partedPartition = None
part.disk = None
for disk in disks:
# remove empty extended so it doesn't interfere
extended = disk.format.extendedPartition
if extended and not disk.format.logicalPartitions and \
(flags.installer_mode or
extended not in (p.partedPartition for p in all_partitions)):
log.debug("removing empty extended partition from %s", disk.name)
disk.format.partedDisk.removePartition(extended)
def addPartition(disklabel, free, part_type, size, start=None, end=None):
""" Add a new partition to a disk.
:param disklabel: the disklabel to add the partition to
:type disklabel: :class:`~.formats.DiskLabel`
:param free: the free region in which to place the new partition
:type free: :class:`parted.Geometry`
:param part_type: the partition type
:type part_type: a parted.PARTITION_* constant
:param size: size of the new partition
:type size: :class:`~.size.Size`
:keyword start: starting sector for the partition
:type start: int
:keyword end: ending sector for the partition
:type end: int
:raises: :class:`~.errors.PartitioningError`
:returns: the newly added partitions
:rtype: :class:`parted.Partition`
.. note::
The new partition will be aligned using the kernel-provided optimal
alignment unless a start sector is provided.
"""
sectorSize = Size(disklabel.partedDevice.sectorSize)
if start is not None:
if end is None:
end = start + sizeToSectors(size, sectorSize) - 1
else:
start = free.start
if not disklabel.alignment.isAligned(free, start):
start = disklabel.alignment.alignNearest(free, start)
if disklabel.labelType == "sun" and start == 0:
start = disklabel.alignment.alignUp(free, start)
if part_type == parted.PARTITION_LOGICAL:
# make room for logical partition's metadata
start += disklabel.alignment.grainSize
if start != free.start:
log.debug("adjusted start sector from %d to %d", free.start, start)
if part_type == parted.PARTITION_EXTENDED and not size:
end = free.end
length = end - start + 1
else:
length = sizeToSectors(size, sectorSize)
end = start + length - 1
if not disklabel.endAlignment.isAligned(free, end):
end = disklabel.endAlignment.alignUp(free, end)
log.debug("adjusted length from %d to %d", length, end - start + 1)
if start > end:
raise PartitioningError(_("unable to allocate aligned partition"))
new_geom = parted.Geometry(device=disklabel.partedDevice,
start=start,
end=end)
max_length = disklabel.partedDisk.maxPartitionLength
if max_length and new_geom.length > max_length:
raise PartitioningError(_("requested size exceeds maximum allowed"))
# create the partition and add it to the disk
partition = parted.Partition(disk=disklabel.partedDisk,
type=part_type,
geometry=new_geom)
constraint = parted.Constraint(exactGeom=new_geom)
disklabel.partedDisk.addPartition(partition=partition,
constraint=constraint)
return partition
def getFreeRegions(disks, align=False):
""" Return a list of free regions on the specified disks.
:param disks: list of disks
:type disks: list of :class:`~.devices.Disk`
:param align: align the region length to disk grainSize
:type align: bool
:returns: list of free regions
:rtype: list of :class:`parted.Geometry`
Only free regions guaranteed to contain at least one aligned sector for
both the start and end alignments in the
:class:`~.formats.disklabel.DiskLabel` are returned.
"""
free = []
for disk in disks:
for f in disk.format.partedDisk.getFreeSpaceRegions():
grain_size = disk.format.alignment.grainSize
if f.length >= grain_size:
if align:
aligned_length = f.length - (f.length % grain_size)
log.debug("length of free region aligned from %d to %d",
f.length, aligned_length)
f.length = aligned_length
free.append(f)
return free
def updateExtendedPartitions(storage, disks):
""" Reconcile extended partition changes with the DeviceTree.
:param storage: the Blivet instance
:type storage: :class:`~.Blivet`
:param disks: list of disks
:type disks: list of :class:`~.devices.StorageDevice`
:returns: :const:`None`
:rtype: NoneType
"""
# XXX hack -- if we created any extended partitions we need to add
# them to the tree now
for disk in disks:
extended = disk.format.extendedPartition
if not extended:
# remove any obsolete extended partitions
for part in storage.partitions:
if part.disk == disk and part.isExtended:
if part.exists:
storage.destroyDevice(part)
else:
storage.devicetree._removeDevice(part, modparent=False)
continue
extendedName = devicePathToName(extended.getDeviceNodeName())
device = storage.devicetree.getDeviceByName(extendedName)
if device:
if not device.exists:
# created by us, update partedPartition
device.partedPartition = extended
# remove any obsolete extended partitions
for part in storage.partitions:
if part.disk == disk and part.isExtended and \
part.partedPartition not in disk.format.partitions:
if part.exists:
storage.destroyDevice(part)
else:
storage.devicetree._removeDevice(part, modparent=False)
if device:
continue
# This is a little odd because normally instantiating a partition
# that does not exist means leaving self.parents empty and instead
# populating self.req_disks. In this case, we need to skip past
# that since this partition is already defined.
device = PartitionDevice(extendedName, parents=disk)
device.parents = [disk]
device.partedPartition = extended
# just add the device for now -- we'll handle actions at the last
# moment to simplify things
storage.devicetree._addDevice(device)
def doPartitioning(storage):
""" Allocate and grow partitions.
When this function returns without error, all PartitionDevice
instances must have their parents set to the disk they are
allocated on, and their partedPartition attribute set to the
appropriate parted.Partition instance from their containing
disk. All req_xxxx attributes must be unchanged.
:param storage: Blivet instance
:type storage: :class:`~.Blivet`
:raises: :class:`~.errors.PartitioningError`
:returns: :const:`None`
"""
disks = [d for d in storage.partitioned if not d.protected]
for disk in disks:
try:
disk.setup()
except DeviceError as e:
log.error("failed to set up disk %s: %s", disk.name, e)
raise PartitioningError(_("disk %s inaccessible") % disk.name)
# Remove any extended partition that does not have an action associated.
#
# XXX This does not remove the extended from the parted.Disk, but it should
# cause removeNewPartitions to remove it since there will no longer be
# a PartitionDevice for it.
for partition in storage.partitions:
if not partition.exists and partition.isExtended and \
not storage.devicetree.findActions(device=partition, action_type="create"):
storage.devicetree._removeDevice(partition, modparent=False, force=True)
partitions = storage.partitions[:]
for part in storage.partitions:
part.req_bootable = False
if not part.exists:
# start over with flexible-size requests
part.req_size = part.req_base_size
try:
storage.bootDevice.req_bootable = True
except AttributeError:
# there's no stage2 device. hopefully it's temporary.
pass
removeNewPartitions(disks, partitions, partitions)
free = getFreeRegions(disks)
try:
allocatePartitions(storage, disks, partitions, free)
growPartitions(disks, partitions, free, size_sets=storage.size_sets)
except Exception:
raise
else:
# Mark all growable requests as no longer growable.
for partition in storage.partitions:
log.debug("fixing size of %s", partition)
partition.req_grow = False
partition.req_base_size = partition.size
partition.req_size = partition.size
finally:
# these are only valid for one allocation run
storage.size_sets = []
# The number and thus the name of partitions may have changed now,
# allocatePartitions() takes care of this for new partitions, but not
# for pre-existing ones, so we update the name of all partitions here
for part in storage.partitions:
# leave extended partitions as-is -- we'll handle them separately
if part.isExtended:
continue
part.updateName()
updateExtendedPartitions(storage, disks)
for part in [p for p in storage.partitions if not p.exists]:
problem = part.checkSize()
if problem < 0:
raise PartitioningError(_("partition is too small for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"format": part.format.name, "minSize": part.format.minSize,
"maxSize": part.format.maxSize})
elif problem > 0:
raise PartitioningError(_("partition is too large for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"format": part.format.name, "minSize": part.format.minSize,
"maxSize": part.format.maxSize})
def align_size_for_disklabel(size, disklabel):
# Align the base size to the disk's grain size.
grain_size = Size(disklabel.alignment.grainSize)
grains, rem = divmod(size, grain_size)
return (grains * grain_size) + (grain_size if rem else Size(0))
def allocatePartitions(storage, disks, partitions, freespace):
""" Allocate partitions based on requested features.
:param storage: a Blivet instance
:type storage: :class:`~.Blivet`
:param disks: list of usable disks
:type disks: list of :class:`~.devices.StorageDevice`
:param partitions: list of partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param freespace: list of free regions on disks
:type freespace: list of :class:`parted.Geometry`
:raises: :class:`~.errors.PartitioningError`
:returns: :const:`None`
Non-existing partitions are sorted according to their requested
attributes, and then allocated.
The basic approach to sorting is that the more specifically-
defined a request is, the earlier it will be allocated. See
:func:`partitionCompare` for details of the sorting criteria.
The :class:`~.devices.PartitionDevice` instances will have their name
and parents attributes set once they have been allocated.
"""
log.debug("allocatePartitions: disks=%s ; partitions=%s",
[d.name for d in disks],
["%s(id %d)" % (p.name, p.id) for p in partitions])
new_partitions = [p for p in partitions if not p.exists]
new_partitions.sort(key=_partitionCompareKey)
# the following dicts all use device path strings as keys
disklabels = {} # DiskLabel instances for each disk
all_disks = {} # StorageDevice for each disk
for disk in disks:
if disk.path not in disklabels.keys():
disklabels[disk.path] = disk.format
all_disks[disk.path] = disk
removeNewPartitions(disks, new_partitions, partitions)
for _part in new_partitions:
if _part.partedPartition and _part.isExtended:
# ignore new extendeds as they are implicit requests
continue
# obtain the set of candidate disks
req_disks = []
if _part.req_disks:
# use the requested disk set
req_disks = _part.req_disks
else:
# no disks specified means any disk will do
req_disks = disks
# sort the disks, making sure the boot disk is first
req_disks.sort(key=storage.compareDisksKey)
for disk in req_disks:
if storage.bootDisk and disk == storage.bootDisk:
boot_index = req_disks.index(disk)
req_disks.insert(0, req_disks.pop(boot_index))
boot = _part.req_base_weight > 1000
log.debug("allocating partition: %s ; id: %d ; disks: %s ;\n"
"boot: %s ; primary: %s ; size: %s ; grow: %s ; "
"max_size: %s ; start: %s ; end: %s", _part.name, _part.id,
[d.name for d in req_disks],
boot, _part.req_primary,
_part.req_size, _part.req_grow,
_part.req_max_size, _part.req_start_sector,
_part.req_end_sector)
free = None
use_disk = None
part_type = None
growth = 0 # in sectors
# loop through disks
for _disk in req_disks:
disklabel = disklabels[_disk.path]
best = None
current_free = free
# for growable requests, we don't want to pass the current free
# geometry to getBestFreeRegion -- this allows us to try the
# best region from each disk and choose one based on the total
# growth it allows
if _part.req_grow:
current_free = None
log.debug("checking freespace on %s", _disk.name)
if _part.req_start_sector is None:
req_size = align_size_for_disklabel(_part.req_size, disklabel)
else:
# don't align size if start sector was specified
req_size = _part.req_size
if req_size != _part.req_size:
log.debug("size %s rounded up to %s for disk %s",
_part.req_size, req_size, _disk.name)
new_part_type = getNextPartitionType(disklabel.partedDisk)
if new_part_type is None:
# can't allocate any more partitions on this disk
log.debug("no free partition slots on %s", _disk.name)
continue
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL:
if (disklabel.partedDisk.primaryPartitionCount <
disklabel.partedDisk.maxPrimaryPartitionCount):
# don't fail to create a primary if there are only three
# primary partitions on the disk (#505269)
new_part_type = parted.PARTITION_NORMAL
else:
# we need a primary slot and none are free on this disk
log.debug("no primary slots available on %s", _disk.name)
continue
elif _part.req_partType is not None and \
new_part_type != _part.req_partType:
new_part_type = _part.req_partType
best = getBestFreeSpaceRegion(disklabel.partedDisk,
new_part_type,
req_size,
start=_part.req_start_sector,
best_free=current_free,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if best == free and not _part.req_primary and \
new_part_type == parted.PARTITION_NORMAL:
# see if we can do better with a logical partition
log.debug("not enough free space for primary -- trying logical")
new_part_type = getNextPartitionType(disklabel.partedDisk,
no_primary=True)
if new_part_type:
best = getBestFreeSpaceRegion(disklabel.partedDisk,
new_part_type,
req_size,
start=_part.req_start_sector,
best_free=current_free,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if best and free != best:
update = True
allocated = new_partitions[:new_partitions.index(_part)+1]
if any([p.req_grow for p in allocated]):
log.debug("evaluating growth potential for new layout")
new_growth = 0
for disk_path in disklabels.keys():
log.debug("calculating growth for disk %s", disk_path)
# Now we check, for growable requests, which of the two
# free regions will allow for more growth.
# set up chunks representing the disks' layouts
temp_parts = []
for _p in new_partitions[:new_partitions.index(_part)]:
if _p.disk.path == disk_path:
temp_parts.append(_p)
# add the current request to the temp disk to set up
# its partedPartition attribute with a base geometry
if disk_path == _disk.path:
_part_type = new_part_type
_free = best
if new_part_type == parted.PARTITION_EXTENDED and \
new_part_type != _part.req_partType:
addPartition(disklabel, best, new_part_type,
None)
_part_type = parted.PARTITION_LOGICAL
_free = getBestFreeSpaceRegion(disklabel.partedDisk,
_part_type,
req_size,
start=_part.req_start_sector,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if not _free:
log.info("not enough space after adding "
"extended partition for growth test")
if new_part_type == parted.PARTITION_EXTENDED:
e = disklabel.extendedPartition
disklabel.partedDisk.removePartition(e)
continue
temp_part = None
try:
temp_part = addPartition(disklabel,
_free,
_part_type,
req_size,
_part.req_start_sector,
_part.req_end_sector)
except ArithmeticError as e:
log.debug("failed to allocate aligned partition "
"for growth test")
continue
_part.partedPartition = temp_part
_part.disk = _disk
temp_parts.append(_part)
chunks = getDiskChunks(all_disks[disk_path],
temp_parts, freespace)
# grow all growable requests
disk_growth = 0 # in sectors
disk_sector_size = Size(disklabels[disk_path].partedDevice.sectorSize)
for chunk in chunks:
chunk.growRequests()
# record the growth for this layout
new_growth += chunk.growth
disk_growth += chunk.growth
for req in chunk.requests:
log.debug("request %d (%s) growth: %d (%s) "
"size: %s",
req.device.id,
req.device.name,
req.growth,
sectorsToSize(req.growth,
disk_sector_size),
sectorsToSize(req.growth + req.base,
disk_sector_size))
log.debug("disk %s growth: %d (%s)",
disk_path, disk_growth,
sectorsToSize(disk_growth,
disk_sector_size))
if temp_part:
disklabel.partedDisk.removePartition(temp_part)
_part.partedPartition = None
_part.disk = None
if new_part_type == parted.PARTITION_EXTENDED:
e = disklabel.extendedPartition
disklabel.partedDisk.removePartition(e)
log.debug("total growth: %d sectors", new_growth)
# update the chosen free region unless the previous
# choice yielded greater total growth
if free is not None and new_growth <= growth:
log.debug("keeping old free: %d <= %d", new_growth,
growth)
update = False
else:
growth = new_growth
if update:
# now we know we are choosing a new free space,
# so update the disk and part type
log.debug("updating use_disk to %s, type: %s",
_disk.name, new_part_type)
part_type = new_part_type
use_disk = _disk
log.debug("new free: %d-%d / %s", best.start,
best.end,
Size(best.getLength(unit="B")))
log.debug("new free allows for %d sectors of growth", growth)
free = best
if free and boot:
# if this is a bootable partition we want to
# use the first freespace region large enough
# to satisfy the request
log.debug("found free space for bootable request")
break
if free is None:
raise PartitioningError(_("Unable to allocate requested partition scheme."))
_disk = use_disk
disklabel = _disk.format
if _part.req_start_sector is None:
aligned_size = align_size_for_disklabel(_part.req_size, disklabel)
else:
# not aligned
aligned_size = _part.req_size
# create the extended partition if needed
if part_type == parted.PARTITION_EXTENDED and \
part_type != _part.req_partType:
log.debug("creating extended partition")
addPartition(disklabel, free, part_type, None)
# now the extended partition exists, so set type to logical
part_type = parted.PARTITION_LOGICAL
# recalculate freespace
log.debug("recalculating free space")
free = getBestFreeSpaceRegion(disklabel.partedDisk,
part_type,
aligned_size,
start=_part.req_start_sector,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if not free:
raise PartitioningError(_("not enough free space after "
"creating extended partition"))
try:
partition = addPartition(disklabel, free, part_type, aligned_size,
_part.req_start_sector, _part.req_end_sector)
except ArithmeticError:
raise PartitioningError(_("failed to allocate aligned partition"))
log.debug("created partition %s of %s and added it to %s",
partition.getDeviceNodeName(),
Size(partition.getLength(unit="B")),
disklabel.device)
# this one sets the name
_part.partedPartition = partition
_part.disk = _disk
# parted modifies the partition in the process of adding it to
# the disk, so we need to grab the latest version...
_part.partedPartition = disklabel.partedDisk.getPartitionByPath(_part.path)
class Request(object):
""" A partition request.
Request instances are used for calculating how much to grow
partitions.
"""
def __init__(self, device):
"""
:param device: the device being requested
:type device: :class:`~.devices.StorageDevice`
"""
self.device = device
self.growth = 0 # growth in sectors
self.max_growth = 0 # max growth in sectors
self.done = not getattr(device, "req_grow", True) # can we grow this
# request more?
self.base = 0 # base sectors
@property
def reserveRequest(self):
""" Requested reserved fixed extra space for the request (in sectors) """
# generic requests don't need any such extra space
return 0
@property
def growable(self):
""" True if this request is growable. """
return getattr(self.device, "req_grow", True)
@property
def id(self):
""" The id of the Device instance this request corresponds to. """
return self.device.id
def __repr__(self):
s = ("%(type)s instance --\n"
"id = %(id)s name = %(name)s growable = %(growable)s\n"
"base = %(base)d growth = %(growth)d max_grow = %(max_grow)d\n"
"done = %(done)s" %
{"type": self.__class__.__name__, "id": self.id,
"name": self.device.name, "growable": self.growable,
"base": self.base, "growth": self.growth,
"max_grow": self.max_growth, "done": self.done})
return s
class PartitionRequest(Request):
def __init__(self, partition):
"""
:param partition: the partition being requested
:type partition: :class:`~.devices.PartitionDevice`
"""
super(PartitionRequest, self).__init__(partition)
self.base = partition.partedPartition.geometry.length # base sectors
sector_size = Size(partition.partedPartition.disk.device.sectorSize)
if partition.req_grow:
limits = [l for l in [sizeToSectors(partition.req_max_size, sector_size),
sizeToSectors(partition.format.maxSize, sector_size),
partition.partedPartition.disk.maxPartitionLength] if l > 0]
if limits:
max_sectors = min(limits)
self.max_growth = max_sectors - self.base
if self.max_growth <= 0:
# max size is less than or equal to base, so we're done
self.done = True
class LVRequest(Request):
def __init__(self, lv):
"""
:param lv: the logical volume being requested
:type lv: :class:`~.devices.LVMLogicalVolumeDevice`
"""
super(LVRequest, self).__init__(lv)
# Round up to nearest pe. For growable requests this will mean that
# first growth is to fill the remainder of any unused extent.
self.base = int(lv.vg.align(lv.req_size, roundup=True) // lv.vg.peSize)
if lv.req_grow:
limits = [int(l // lv.vg.peSize) for l in
(lv.vg.align(lv.req_max_size),
lv.vg.align(lv.format.maxSize)) if l > Size(0)]
if limits:
max_units = min(limits)
self.max_growth = max_units - self.base
if self.max_growth <= 0:
# max size is less than or equal to base, so we're done
self.done = True
@property
def reserveRequest(self):
reserve = super(LVRequest, self).reserveRequest
if self.device.cached:
total_cache_size = self.device.cache.size + self.device.cache.md_size
reserve += int(self.device.vg.align(total_cache_size, roundup=True) / self.device.vg.peSize)
return reserve
class Chunk(object):
""" A free region from which devices will be allocated """
def __init__(self, length, requests=None):
"""
:param length: the length of the chunk (units vary with subclass)
:type length: int
:keyword requests: list of requests to add
:type requests: list of :class:`Request`
"""
if not hasattr(self, "path"):
self.path = None
self.length = length
self.pool = length # free unit count
self.base = 0 # sum of growable requests' base
# sizes
self.requests = [] # list of Request instances
if isinstance(requests, list):
for req in requests:
self.addRequest(req)
self.skip_list = []
def __repr__(self):
s = ("%(type)s instance --\n"
"device = %(device)s length = %(length)d size = %(size)s\n"
"remaining = %(rem)d pool = %(pool)d" %
{"type": self.__class__.__name__, "device": self.path,
"length": self.length, "size": self.lengthToSize(self.length),
"pool": self.pool, "rem": self.remaining})
return s
# Force str and unicode types in case path is unicode
def _toString(self):
s = "%d on %s" % (self.length, self.path)
return s
def __str__(self):
return stringize(self._toString())
def __unicode__(self):
return unicodeize(self._toString())
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`Request`
"""
log.debug("adding request %d to chunk %s", req.device.id, self)
self.requests.append(req)
self.pool -= req.base
self.pool -= req.reserveRequest
if not req.done:
self.base += req.base
def reclaim(self, request, amount):
""" Reclaim units from a request and return them to the pool.
:param request: the request to reclaim units from
:type request: :class:`Request`
:param amount: number of units to reclaim from the request
:type amount: int
:raises: ValueError
:returns: None
"""
log.debug("reclaim: %s %d (%s)", request, amount, self.lengthToSize(amount))
if request.growth < amount:
log.error("tried to reclaim %d from request with %d of growth",
amount, request.growth)
raise ValueError(_("cannot reclaim more than request has grown"))
request.growth -= amount
self.pool += amount
# put this request in the skip list so we don't try to grow it the
# next time we call growRequests to allocate the newly re-acquired pool
if request not in self.skip_list:
self.skip_list.append(request)
<|fim▁hole|> @property
def growth(self):
""" Sum of growth for all requests in this chunk. """
return sum(r.growth for r in self.requests)
@property
def hasGrowable(self):
""" True if this chunk contains at least one growable request. """
for req in self.requests:
if req.growable:
return True
return False
@property
def remaining(self):
""" Number of requests still being grown in this chunk. """
return len([d for d in self.requests if not d.done])
@property
def done(self):
""" True if we are finished growing all requests in this chunk. """
return self.remaining == 0 or self.pool == 0
def maxGrowth(self, req):
return req.max_growth
def lengthToSize(self, length):
return length
def sizeToLength(self, size):
return size
def trimOverGrownRequest(self, req, base=None):
""" Enforce max growth and return extra units to the pool.
:param req: the request to trim
:type req: :class:`Request`
:keyword base: base unit count to adjust if req is done growing
:type base: int
:returns: the new base or None if no base was given
:rtype: int or None
"""
max_growth = self.maxGrowth(req)
if max_growth and req.growth >= max_growth:
if req.growth > max_growth:
# we've grown beyond the maximum. put some back.
extra = req.growth - max_growth
log.debug("taking back %d (%s) from %d (%s)",
extra, self.lengthToSize(extra),
req.device.id, req.device.name)
self.pool += extra
req.growth = max_growth
# We're done growing this request, so it no longer
# factors into the growable base used to determine
# what fraction of the pool each request gets.
if base is not None:
base -= req.base
req.done = True
return base
def sortRequests(self):
pass
def growRequests(self, uniform=False):
""" Calculate growth amounts for requests in this chunk.
:keyword uniform: grow requests uniformly instead of proportionally
:type uniform: bool
The default mode of growth is as follows: given a total number of
available units, requests receive an allotment proportional to their
base sizes. That means a request with base size 1000 will grow four
times as fast as a request with base size 250.
Under uniform growth, all requests receive an equal portion of the
free units.
"""
log.debug("Chunk.growRequests: %r", self)
self.sortRequests()
for req in self.requests:
log.debug("req: %r", req)
# we use this to hold the base for the next loop through the
# chunk's requests since we want the base to be the same for
# all requests in any given growth iteration
new_base = self.base
last_pool = 0 # used to track changes to the pool across iterations
while not self.done and self.pool and last_pool != self.pool:
last_pool = self.pool # to keep from getting stuck
self.base = new_base
if uniform:
growth = int(last_pool / self.remaining)
log.debug("%d requests and %s (%s) left in chunk",
self.remaining, self.pool, self.lengthToSize(self.pool))
for p in self.requests:
if p.done or p in self.skip_list:
continue
if not uniform:
# Each request is allocated free units from the pool
# based on the relative _base_ sizes of the remaining
# growable requests.
share = Decimal(p.base) / Decimal(self.base)
growth = int(share * last_pool) # truncate, don't round
p.growth += growth
self.pool -= growth
log.debug("adding %s (%s) to %d (%s)",
growth, self.lengthToSize(growth),
p.device.id, p.device.name)
new_base = self.trimOverGrownRequest(p, base=new_base)
log.debug("new grow amount for request %d (%s) is %s "
"units, or %s",
p.device.id, p.device.name, p.growth,
self.lengthToSize(p.growth))
if self.pool:
# allocate any leftovers in pool to the first partition
# that can still grow
for p in self.requests:
if p.done or p in self.skip_list:
continue
growth = self.pool
p.growth += growth
self.pool = 0
log.debug("adding %s (%s) to %d (%s)",
growth, self.lengthToSize(growth),
p.device.id, p.device.name)
self.trimOverGrownRequest(p)
log.debug("new grow amount for request %d (%s) is %s "
"units, or %s",
p.device.id, p.device.name, p.growth,
self.lengthToSize(p.growth))
if self.pool == 0:
break
# requests that were skipped over this time through are back on the
# table next time
self.skip_list = []
class DiskChunk(Chunk):
""" A free region on disk from which partitions will be allocated """
def __init__(self, geometry, requests=None):
"""
:param geometry: the free region this chunk represents
:type geometry: :class:`parted.Geometry`
:keyword requests: list of requests to add initially
:type requests: list of :class:`PartitionRequest`
.. note::
We will limit partition growth based on disklabel limitations
for partition end sector, so a 10TB disk with an msdos disklabel
will be treated like a 2TiB disk.
.. note::
If you plan to allocate aligned partitions you should pass in an
aligned geometry instance.
"""
self.geometry = geometry # parted.Geometry
self.sectorSize = Size(self.geometry.device.sectorSize)
self.path = self.geometry.device.path
super(DiskChunk, self).__init__(self.geometry.length, requests=requests)
def __repr__(self):
s = super(DiskChunk, self).__str__()
s += (" start = %(start)d end = %(end)d\n"
"sectorSize = %(sectorSize)s\n" %
{"start": self.geometry.start, "end": self.geometry.end,
"sectorSize": self.sectorSize})
return s
# Force str and unicode types in case path is unicode
def _toString(self):
s = "%d (%d-%d) on %s" % (self.length, self.geometry.start,
self.geometry.end, self.path)
return s
def __str__(self):
return stringize(self._toString())
def __unicode__(self):
return unicodeize(self._toString())
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`PartitionRequest`
"""
if not isinstance(req, PartitionRequest):
raise ValueError(_("DiskChunk requests must be of type "
"PartitionRequest"))
if not self.requests:
# when adding the first request to the chunk, adjust the pool
# size to reflect any disklabel-specific limits on end sector
max_sector = req.device.partedPartition.disk.maxPartitionStartSector
chunk_end = min(max_sector, self.geometry.end)
if chunk_end <= self.geometry.start:
# this should clearly never be possible, but if the chunk's
# start sector is beyond the maximum allowed end sector, we
# cannot continue
log.error("chunk start sector is beyond disklabel maximum")
raise PartitioningError(_("partitions allocated outside "
"disklabel limits"))
new_pool = chunk_end - self.geometry.start + 1
if new_pool != self.pool:
log.debug("adjusting pool to %d based on disklabel limits", new_pool)
self.pool = new_pool
super(DiskChunk, self).addRequest(req)
def maxGrowth(self, req):
""" Return the maximum possible growth for a request.
:param req: the request
:type req: :class:`PartitionRequest`
"""
req_end = req.device.partedPartition.geometry.end
req_start = req.device.partedPartition.geometry.start
# Establish the current total number of sectors of growth for requests
# that lie before this one within this chunk. We add the total count
# to this request's end sector to obtain the end sector for this
# request, including growth of earlier requests but not including
# growth of this request. Maximum growth values are obtained using
# this end sector and various values for maximum end sector.
growth = 0
for request in self.requests:
if request.device.partedPartition.geometry.start < req_start:
growth += request.growth
req_end += growth
# obtain the set of possible maximum sectors-of-growth values for this
# request and use the smallest
limits = []
# disklabel-specific maximum sector
max_sector = req.device.partedPartition.disk.maxPartitionStartSector
limits.append(max_sector - req_end)
# 2TB limit on bootable partitions, regardless of disklabel
if req.device.req_bootable:
max_boot = sizeToSectors(Size("2 TiB"), self.sectorSize)
limits.append(max_boot - req_end)
# request-specific maximum (see Request.__init__, above, for details)
if req.max_growth:
limits.append(req.max_growth)
max_growth = min(limits)
return max_growth
def lengthToSize(self, length):
return sectorsToSize(length, self.sectorSize)
def sizeToLength(self, size):
return sizeToSectors(size, self.sectorSize)
def sortRequests(self):
# sort the partitions by start sector
self.requests.sort(key=lambda r: r.device.partedPartition.geometry.start)
class VGChunk(Chunk):
""" A free region in an LVM VG from which LVs will be allocated """
def __init__(self, vg, requests=None):
"""
:param vg: the volume group whose free space this chunk represents
:type vg: :class:`~.devices.LVMVolumeGroupDevice`
:keyword requests: list of requests to add initially
:type requests: list of :class:`LVRequest`
"""
self.vg = vg
self.path = vg.path
usable_extents = vg.extents - int(vg.align(vg.reservedSpace, roundup=True) / vg.peSize)
super(VGChunk, self).__init__(usable_extents, requests=requests)
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`LVRequest`
"""
if not isinstance(req, LVRequest):
raise ValueError(_("VGChunk requests must be of type "
"LVRequest"))
super(VGChunk, self).addRequest(req)
def lengthToSize(self, length):
return self.vg.peSize * length
def sizeToLength(self, size):
return int(size / self.vg.peSize)
def sortRequests(self):
# sort the partitions by start sector
self.requests.sort(key=_lvCompareKey)
class ThinPoolChunk(VGChunk):
""" A free region in an LVM thin pool from which LVs will be allocated """
def __init__(self, pool, requests=None):
"""
:param pool: the thin pool whose free space this chunk represents
:type pool: :class:`~.devices.LVMThinPoolDevice`
:keyword requests: list of requests to add initially
:type requests: list of :class:`LVRequest`
"""
self.vg = pool.vg # only used for align, &c
self.path = pool.path
usable_extents = (pool.size / pool.vg.peSize)
super(VGChunk, self).__init__(usable_extents, requests=requests) # pylint: disable=bad-super-call
def getDiskChunks(disk, partitions, free):
""" Return a list of Chunk instances representing a disk.
:param disk: the disk
:type disk: :class:`~.devices.StorageDevice`
:param partitions: list of partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param free: list of free regions
:type free: list of :class:`parted.Geometry`
:returns: list of chunks representing the disk
:rtype: list of :class:`DiskChunk`
Partitions and free regions not on the specified disk are ignored.
Chunks contain an aligned version of the free region's geometry.
"""
# list of all new partitions on this disk
disk_parts = [p for p in partitions if p.disk == disk and not p.exists]
disk_free = [f for f in free if f.device.path == disk.path]
chunks = []
for f in disk_free[:]:
# Align the geometry so we have a realistic view of the free space.
# alignUp and alignDown can align in the reverse direction if the only
# aligned sector within the geometry is in that direction, so we have to
# also check that the resulting aligned geometry has a non-zero length.
# (It is possible that both will align to the same sector in a small
# enough region.)
al_start = disk.format.alignment.alignUp(f, f.start)
al_end = disk.format.endAlignment.alignDown(f, f.end)
if al_start >= al_end:
disk_free.remove(f)
continue
geom = parted.Geometry(device=f.device,
start=al_start,
end=al_end)
if geom.length < disk.format.alignment.grainSize:
disk_free.remove(f)
continue
chunks.append(DiskChunk(geom))
for p in disk_parts:
if p.isExtended:
# handle extended partitions specially since they are
# indeed very special
continue
for i, f in enumerate(disk_free):
if f.contains(p.partedPartition.geometry):
chunks[i].addRequest(PartitionRequest(p))
break
return chunks
class TotalSizeSet(object):
""" Set of device requests with a target combined size.
This will be handled by growing the requests until the desired combined
size has been achieved.
"""
def __init__(self, devices, size):
"""
:param devices: the set of devices
:type devices: list of :class:`~.devices.PartitionDevice`
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
self.devices = []
for device in devices:
if isinstance(device, LUKSDevice):
partition = device.slave
else:
partition = device
self.devices.append(partition)
self.size = size
self.requests = []
self.allocated = sum((d.req_base_size for d in self.devices), Size(0))
log.debug("set.allocated = %d", self.allocated)
def allocate(self, amount):
log.debug("allocating %d to TotalSizeSet with %d/%d (%d needed)",
amount, self.allocated, self.size, self.needed)
self.allocated += amount
@property
def needed(self):
return self.size - self.allocated
def deallocate(self, amount):
log.debug("deallocating %d from TotalSizeSet with %d/%d (%d needed)",
amount, self.allocated, self.size, self.needed)
self.allocated -= amount
class SameSizeSet(object):
""" Set of device requests with a common target size. """
def __init__(self, devices, size, grow=False, max_size=None):
"""
:param devices: the set of devices
:type devices: list of :class:`~.devices.PartitionDevice`
:param size: target size for each device/request
:type size: :class:`~.size.Size`
:keyword grow: whether the devices can be grown
:type grow: bool
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
self.devices = []
for device in devices:
if isinstance(device, LUKSDevice):
partition = device.slave
else:
partition = device
self.devices.append(partition)
self.size = int(size / len(devices))
self.grow = grow
self.max_size = max_size
self.requests = []
def manageSizeSets(size_sets, chunks):
growth_by_request = {}
requests_by_device = {}
chunks_by_request = {}
for chunk in chunks:
for request in chunk.requests:
requests_by_device[request.device] = request
chunks_by_request[request] = chunk
growth_by_request[request] = 0
for i in range(2):
reclaimed = dict([(chunk, 0) for chunk in chunks])
for ss in size_sets:
if isinstance(ss, TotalSizeSet):
# TotalSizeSet members are trimmed to achieve the requested
# total size
log.debug("set: %s %d/%d", [d.name for d in ss.devices],
ss.allocated, ss.size)
for device in ss.devices:
request = requests_by_device[device]
chunk = chunks_by_request[request]
new_growth = request.growth - growth_by_request[request]
ss.allocate(chunk.lengthToSize(new_growth))
# decide how much to take back from each request
# We may assume that all requests have the same base size.
# We're shooting for a roughly equal distribution by trimming
# growth from the requests that have grown the most first.
requests = sorted([requests_by_device[d] for d in ss.devices],
key=lambda r: r.growth, reverse=True)
needed = ss.needed
for request in requests:
chunk = chunks_by_request[request]
log.debug("%s", request)
log.debug("needed: %d", ss.needed)
if ss.needed < 0:
# it would be good to take back some from each device
# instead of taking all from the last one(s)
extra = -chunk.sizeToLength(needed) // len(ss.devices)
if extra > request.growth and i == 0:
log.debug("not reclaiming from this request")
continue
else:
extra = min(extra, request.growth)
reclaimed[chunk] += extra
chunk.reclaim(request, extra)
ss.deallocate(chunk.lengthToSize(extra))
if ss.needed <= 0:
request.done = True
elif isinstance(ss, SameSizeSet):
# SameSizeSet members all have the same size as the smallest
# member
requests = [requests_by_device[d] for d in ss.devices]
_min_growth = min([r.growth for r in requests])
log.debug("set: %s %d", [d.name for d in ss.devices], ss.size)
log.debug("min growth is %d", _min_growth)
for request in requests:
chunk = chunks_by_request[request]
_max_growth = chunk.sizeToLength(ss.size) - request.base
log.debug("max growth for %s is %d", request, _max_growth)
min_growth = max(min(_min_growth, _max_growth), 0)
if request.growth > min_growth:
extra = request.growth - min_growth
reclaimed[chunk] += extra
chunk.reclaim(request, extra)
request.done = True
elif request.growth == min_growth:
request.done = True
# store previous growth amounts so we know how much was allocated in
# the latest growRequests call
for request in growth_by_request.keys():
growth_by_request[request] = request.growth
for chunk in chunks:
if reclaimed[chunk] and not chunk.done:
chunk.growRequests()
def growPartitions(disks, partitions, free, size_sets=None):
""" Grow all growable partition requests.
Partitions have already been allocated from chunks of free space on
the disks. This function does not modify the ordering of partitions
or the free chunks from which they are allocated.
Free space within a given chunk is allocated to each growable
partition allocated from that chunk in an amount corresponding to
the ratio of that partition's base size to the sum of the base sizes
of all growable partitions allocated from the chunk.
:param disks: all usable disks
:type disks: list of :class:`~.devices.StorageDevice`
:param partitions: all partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param free: all free regions on disks
:type free: list of :class:`parted.Geometry`
:keyword size_sets: list of size-related partition sets
:type size_sets: list of :class:`TotalSizeSet` or :class:`SameSizeSet`
:returns: :const:`None`
"""
log.debug("growPartitions: disks=%s, partitions=%s",
[d.name for d in disks],
["%s(id %d)" % (p.name, p.id) for p in partitions])
all_growable = [p for p in partitions if p.req_grow]
if not all_growable:
log.debug("no growable partitions")
return
if size_sets is None:
size_sets = []
log.debug("growable partitions are %s", [p.name for p in all_growable])
#
# collect info about each disk and the requests it contains
#
chunks = []
for disk in disks:
# list of free space regions on this disk prior to partition allocation
disk_free = [f for f in free if f.device.path == disk.path]
if not disk_free:
log.debug("no free space on %s", disk.name)
continue
disk_chunks = getDiskChunks(disk, partitions, disk_free)
log.debug("disk %s has %d chunks", disk.name, len(disk_chunks))
chunks.extend(disk_chunks)
#
# grow the partitions in each chunk as a group
#
for chunk in chunks:
if not chunk.hasGrowable:
# no growable partitions in this chunk
continue
chunk.growRequests()
# adjust set members' growth amounts as needed
manageSizeSets(size_sets, chunks)
for disk in disks:
log.debug("growing partitions on %s", disk.name)
for chunk in chunks:
if chunk.path != disk.path:
continue
if not chunk.hasGrowable:
# no growable partitions in this chunk
continue
# recalculate partition geometries
disklabel = disk.format
start = chunk.geometry.start
# find any extended partition on this disk
extended_geometry = getattr(disklabel.extendedPartition,
"geometry",
None) # parted.Geometry
# align start sector as needed
if not disklabel.alignment.isAligned(chunk.geometry, start):
start = disklabel.alignment.alignUp(chunk.geometry, start)
new_partitions = []
for p in chunk.requests:
ptype = p.device.partedPartition.type
log.debug("partition %s (%d): %s", p.device.name,
p.device.id, ptype)
if ptype == parted.PARTITION_EXTENDED:
continue
# XXX since we need one metadata sector before each
# logical partition we burn one logical block to
# safely align the start of each logical partition
if ptype == parted.PARTITION_LOGICAL:
start += disklabel.alignment.grainSize
new_length = p.base + p.growth
end = start + new_length - 1
# align end sector as needed
if not disklabel.endAlignment.isAligned(chunk.geometry, end):
end = disklabel.endAlignment.alignDown(chunk.geometry, end)
new_geometry = parted.Geometry(device=disklabel.partedDevice,
start=start,
end=end)
log.debug("new geometry for %s: %s", p.device.name,
new_geometry)
start = end + 1
new_partition = parted.Partition(disk=disklabel.partedDisk,
type=ptype,
geometry=new_geometry)
new_partitions.append((new_partition, p.device))
# remove all new partitions from this chunk
removeNewPartitions([disk], [r.device for r in chunk.requests],
partitions)
log.debug("back from removeNewPartitions")
# adjust the extended partition as needed
# we will ony resize an extended partition that we created
log.debug("extended: %s", extended_geometry)
if extended_geometry and \
chunk.geometry.contains(extended_geometry):
log.debug("setting up new geometry for extended on %s", disk.name)
ext_start = 0
for (partition, device) in new_partitions:
if partition.type != parted.PARTITION_LOGICAL:
continue
if not ext_start or partition.geometry.start < ext_start:
# account for the logical block difference in start
# sector for the extended -v- first logical
# (partition.geometry.start is already aligned)
ext_start = partition.geometry.start - disklabel.alignment.grainSize
new_geometry = parted.Geometry(device=disklabel.partedDevice,
start=ext_start,
end=chunk.geometry.end)
log.debug("new geometry for extended: %s", new_geometry)
new_extended = parted.Partition(disk=disklabel.partedDisk,
type=parted.PARTITION_EXTENDED,
geometry=new_geometry)
ptypes = [p.type for (p, d) in new_partitions]
for pt_idx, ptype in enumerate(ptypes):
if ptype == parted.PARTITION_LOGICAL:
new_partitions.insert(pt_idx, (new_extended, None))
break
# add the partitions with their new geometries to the disk
for (partition, device) in new_partitions:
if device:
name = device.name
else:
# If there was no extended partition on this disk when
# doPartitioning was called we won't have a
# PartitionDevice instance for it.
name = partition.getDeviceNodeName()
log.debug("setting %s new geometry: %s", name,
partition.geometry)
constraint = parted.Constraint(exactGeom=partition.geometry)
disklabel.partedDisk.addPartition(partition=partition,
constraint=constraint)
path = partition.path
if device:
# set the device's name
device.partedPartition = partition
# without this, the path attr will be a basename. eek.
device.disk = disk
# make sure we store the disk's version of the partition
newpart = disklabel.partedDisk.getPartitionByPath(path)
device.partedPartition = newpart
def lvCompare(lv1, lv2):
""" More specifically defined lvs come first.
< 1 => x < y
0 => x == y
> 1 => x > y
"""
if not isinstance(lv1, Device):
lv1 = lv1.device
if not isinstance(lv2, Device):
lv2 = lv2.device
ret = 0
# larger requests go to the front of the list
ret -= compare(lv1.size, lv2.size) * 100
# fixed size requests to the front
ret += compare(lv1.req_grow, lv2.req_grow) * 50
# potentially larger growable requests go to the front
if lv1.req_grow and lv2.req_grow:
if not lv1.req_max_size and lv2.req_max_size:
ret -= 25
elif lv1.req_max_size and not lv2.req_max_size:
ret += 25
else:
ret -= compare(lv1.req_max_size, lv2.req_max_size) * 25
if ret > 0:
ret = 1
elif ret < 0:
ret = -1
return ret
_lvCompareKey = functools.cmp_to_key(lvCompare)
def _apply_chunk_growth(chunk):
""" grow the lvs by the amounts the VGChunk calculated """
for req in chunk.requests:
if not req.device.req_grow:
continue
size = chunk.lengthToSize(req.base + req.growth)
# reduce the size of thin pools by the pad size
if hasattr(req.device, "lvs"):
size -= Size(blockdev.lvm.get_thpool_padding(size, req.device.vg.peSize, included=True))
# Base is pe, which means potentially rounded up by as much as
# pesize-1. As a result, you can't just add the growth to the
# initial size.
req.device.size = size
def growLVM(storage):
""" Grow LVs according to the sizes of the PVs.
Strategy for growth involving thin pools:
- Applies to device factory class as well.
- Overcommit is not allowed.
- Pool lv's base size includes sizes of thin lvs within it.
- Pool is grown along with other non-thin lvs.
- Thin lvs within each pool are grown separately using the
ThinPoolChunk class.
"""
for vg in storage.vgs:
total_free = vg.freeSpace
if total_free < 0:
# by now we have allocated the PVs so if there isn't enough
# space in the VG we have a real problem
raise PartitioningError(_("not enough space for LVM requests"))
elif not total_free:
log.debug("vg %s has no free space", vg.name)
continue
log.debug("vg %s: %s free ; lvs: %s", vg.name, total_free,
[l.lvname for l in vg.lvs])
# don't include thin lvs in the vg's growth calculation
fatlvs = [lv for lv in vg.lvs if lv not in vg.thinlvs]
requests = []
for lv in fatlvs:
if lv in vg.thinpools:
# make sure the pool's base size is at least the sum of its lvs'
lv.req_size = max(lv.req_size, lv.usedSpace)
# add the required padding to the requested pool size
lv.req_size += Size(blockdev.lvm.get_thpool_padding(lv.req_size, vg.peSize))
# establish sizes for the percentage-based requests (which are fixed)
percentage_based_lvs = [lv for lv in vg.lvs if lv.req_percent]
if sum(lv.req_percent for lv in percentage_based_lvs) > 100:
raise ValueError("sum of percentages within a vg cannot exceed 100")
percent_base = sum(vg.align(lv.req_size, roundup=False) / vg.peSize
for lv in percentage_based_lvs)
percentage_basis = vg.freeExtents + percent_base
for lv in percentage_based_lvs:
new_extents = int(lv.req_percent * Decimal('0.01') * percentage_basis)
# set req_size also so the request can also be growable if desired
lv.size = lv.req_size = vg.peSize * new_extents
# grow regular lvs
chunk = VGChunk(vg, requests=[LVRequest(l) for l in fatlvs])
chunk.growRequests()
_apply_chunk_growth(chunk)
# now, grow thin lv requests within their respective pools
for pool in vg.thinpools:
requests = [LVRequest(l) for l in pool.lvs]
thin_chunk = ThinPoolChunk(pool, requests)
thin_chunk.growRequests()
_apply_chunk_growth(thin_chunk)<|fim▁end|> | |
<|file_name|>directinput.py<|end_file_name|><|fim▁begin|># direct inputs
# source to this solution and code:
# http://stackoverflow.com/questions/14489013/simulate-python-keypresses-for-controlling-a-game
# http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
import ctypes
import time
HELD = set()
SendInput = ctypes.windll.user32.SendInput
mouse_button_down_mapping = {<|fim▁hole|>}
mouse_button_up_mapping = {
'left': 0x0004,
'middle': 0x0040,
'right': 0x0010
}
CODES = {
'esc': 0x01,
'escape': 0x01,
'1': 0x02,
'2': 0x03,
'3': 0x04,
'4': 0x05,
'5': 0x06,
'6': 0x07,
'7': 0x08,
'8': 0x09,
'9': 0x10,
'q': 0x10,
'w': 0x11,
'e': 0x12,
'r': 0x13,
't': 0x14,
'y': 0x15,
'u': 0x16,
'i': 0x17,
'o': 0x18,
'p': 0x19,
'a': 0x1E,
's': 0x1F,
'd': 0x20,
'f': 0x21,
'g': 0x22,
'h': 0x23,
'j': 0x24,
'k': 0x25,
'l': 0x26,
'z': 0x2C,
'x': 0x2D,
'c': 0x2E,
'v': 0x2F,
'b': 0x30,
'n': 0x31,
'm': 0x32,
'ctrl': 0x1D,
'pageup': 0xC9 + 1024,
'pagedown': 0xD1 + 1024,
'up': 0xC8,
'left': 0xCB,
'right': 0xCD,
'down': 0xD0,
'alt': 0x38,
}
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def release_all():
held = list(HELD)
for key in held:
release(key)
try:
HELD.remove(key)
except KeyError:
pass
def hold(key):
hexKeyCode = CODES[str(key)]
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
HELD.add(key)
def release(key):
hexKeyCode = CODES[str(key)]
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
HELD.remove(key)
def send(keys):
delay = .1
for key in keys:
hold(key)
time.sleep(delay)
release(key)
# for code in keycodes:
# time.sleep(delay)
def click_down(button='left'):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.mi = MouseInput(0, 0, 0, mouse_button_down_mapping[button], 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(0), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def click_up(button='left'):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.mi = MouseInput(0, 0, 0, mouse_button_up_mapping[button], 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(0), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def click(button='left', duration=0.05):
click_down(button=button)
time.sleep(duration)
click_up(button=button)
if __name__ == '__main__':
time.sleep(10)
click()
# send(['w'])
# for i in range(100):
# send('wasd')
# hold(CODES['w'])
# time.sleep(5)
# release(CODES['w'])
# time.sleep(5)
# hold(ONE)
# release(ONE)
# time.sleep(1)
# hold(TWO)
# time.sleep(1)
# release(TWO)
# time.sleep(1)<|fim▁end|> | 'left': 0x0002,
'middle': 0x0020,
'right': 0x0008 |
<|file_name|>interpolate-filter_test.js<|end_file_name|><|fim▁begin|>'use strict';
describe('GodHasAPlan.version module', function() {
beforeEach(module('GodHasAPlan.version'));
describe('interpolate filter', function() {
beforeEach(module(function($provide) {
$provide.value('version', 'TEST_VER');
}));
it('should replace VERSION', inject(function(interpolateFilter) {
expect(interpolateFilter('before %VERSION% after')).toEqual('before TEST_VER after');
}));<|fim▁hole|><|fim▁end|> | });
}); |
<|file_name|>ios-simulator-discovery.ts<|end_file_name|><|fim▁begin|>import { DeviceDiscovery } from "./device-discovery";
import { IOSSimulator } from "./../ios/simulator/ios-simulator-device";
import { EmulatorDiscoveryNames } from "../../constants";
import { IDictionary, IHostInfo } from "../../declarations";
import { IInjector } from "../../definitions/yok";
import { injector } from "../../yok";
import * as _ from "lodash";
export class IOSSimulatorDiscovery extends DeviceDiscovery {
private cachedSimulators: Mobile.IiSimDevice[] = [];
private availableSimulators: IDictionary<Mobile.IDeviceInfo> = {};
constructor(
private $injector: IInjector,
private $iOSSimResolver: Mobile.IiOSSimResolver,
private $mobileHelper: Mobile.IMobileHelper,
private $hostInfo: IHostInfo,
private $iOSEmulatorServices: Mobile.IiOSSimulatorService
) {
super();
}
public async startLookingForDevices(
options?: Mobile.IDeviceLookingOptions
): Promise<void> {
if (
options &&
options.platform &&
!this.$mobileHelper.isiOSPlatform(options.platform)
) {
return;
}
return this.checkForDevices();
}
private async checkForDevices(): Promise<void> {
if (this.$hostInfo.isDarwin) {
const currentSimulators: Mobile.IiSimDevice[] = await this.$iOSSimResolver.iOSSim.getRunningSimulators();
// Remove old simulators
_(this.cachedSimulators)
.reject((s) =>
_.some(
currentSimulators,
(simulator) =>
simulator &&
s &&
simulator.id === s.id &&
simulator.state === s.state
)
)<|fim▁hole|> .reject((s) =>
_.some(
this.cachedSimulators,
(simulator) =>
simulator &&
s &&
simulator.id === s.id &&
simulator.state === s.state
)
)
.each((s) => this.createAndAddDevice(s));
}
}
public async checkForAvailableSimulators(): Promise<Mobile.IDeviceInfo[]> {
if (!this.$hostInfo.isDarwin) {
return [];
}
const simulators = (await this.$iOSEmulatorServices.getEmulatorImages())
.devices;
const currentSimulators = _.values(this.availableSimulators);
const lostSimulators: Mobile.IDeviceInfo[] = [];
const foundSimulators: Mobile.IDeviceInfo[] = [];
for (const simulator of currentSimulators) {
if (
!_.find(
this.availableSimulators,
(s) => s.imageIdentifier === simulator.imageIdentifier
)
) {
lostSimulators.push(simulator);
}
}
for (const simulator of simulators) {
if (!this.availableSimulators[simulator.imageIdentifier]) {
foundSimulators.push(simulator);
}
}
if (lostSimulators.length) {
this.raiseOnEmulatorImagesLost(lostSimulators);
}
if (foundSimulators.length) {
this.raiseOnEmulatorImagesFound(foundSimulators);
}
return simulators;
}
private createAndAddDevice(simulator: Mobile.IiSimDevice): void {
this.cachedSimulators.push(_.cloneDeep(simulator));
this.addDevice(
this.$injector.resolve(IOSSimulator, { simulator: simulator })
);
}
private deleteAndRemoveDevice(simulator: Mobile.IiSimDevice): void {
_.remove(this.cachedSimulators, (s) => s && s.id === simulator.id);
this.removeDevice(simulator.id);
}
private raiseOnEmulatorImagesFound(simulators: Mobile.IDeviceInfo[]) {
_.forEach(simulators, (simulator) => {
this.availableSimulators[simulator.imageIdentifier] = simulator;
this.emit(EmulatorDiscoveryNames.EMULATOR_IMAGE_FOUND, simulator);
});
}
private raiseOnEmulatorImagesLost(simulators: Mobile.IDeviceInfo[]) {
_.forEach(simulators, (simulator) => {
delete this.availableSimulators[simulator.imageIdentifier];
this.emit(EmulatorDiscoveryNames.EMULATOR_IMAGE_LOST, simulator);
});
}
}
injector.register("iOSSimulatorDiscovery", IOSSimulatorDiscovery);<|fim▁end|> | .each((s) => this.deleteAndRemoveDevice(s));
// Add new simulators
_(currentSimulators) |
<|file_name|>valueset.py<|end_file_name|><|fim▁begin|>import functools
import itertools
from ..backend_object import BackendObject
def normalize_types(f):
@functools.wraps(f)
def normalizer(self, region, o):
'''
Convert any object to an object that we can process.
'''
if isinstance(o, IfProxy):
return NotImplemented
if isinstance(o, Base):
o = o.model
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
'''
Convert any object to an object that we can process.
'''
if isinstance(o, IfProxy):
return NotImplemented
if isinstance(o, Base):
o = o.model
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class ValueSet(BackendObject):
def __init__(self, name=None, region=None, bits=None, val=None):
self._name = name
if self._name is None:
self._name = 'VS_%d' % vs_id_ctr.next()
self._regions = {}
self._reversed = False
if region is not None and bits is not None and val is not None:
self.set_si(region, StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val))
@property
def name(self):
return self._name
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def bits(self):
return self.size()
@normalize_types
def set_si(self, region, si):
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def items(self):
return self._regions.items()
def size(self):
return len(self)
@normalize_types
def merge_si(self, region, si):
if region not in self._regions:
self.set_si(region, si)
else:
self._regions[region] = self._regions[region].union(si)
@normalize_types
def remove_si(self, region, si):
raise NotImplementedError()
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
if self.is_empty:
return 0
return len(self._regions.items()[0][1])
@normalize_types_one_arg
def __add__(self, other):
if type(other) is ValueSet:
# Normally, addition between two addresses doesn't make any sense.
# So we only handle those corner cases
raise NotImplementedError()
else:
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs._regions[region] = si + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
if type(other) is ValueSet:
# It might happen due to imprecision of our analysis (mostly due the absence of contexts)
if self.regions.keys() == other.regions.keys():
# Handle it here
new_vs = ValueSet()
for region, si in self._regions.iteritems():
new_vs._regions[region] = si - other._regions[region]
return new_vs
else:
__import__('ipdb').set_trace()
raise NotImplementedError()
else:
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
if type(other) is ValueSet:
# An address bitwise-and another address? WTF?
assert False
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
new_vs = ValueSet()
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return an SI instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
return ~ (self == other)
def eval(self, n):
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
def copy(self):
vs = ValueSet()
vs._regions = self._regions.copy()
vs._reversed = self._reversed
return vs
def reverse(self):
print "valueset.reverse is not properly implemented"
vs = self.copy()
vs._reversed = not vs._reversed
return vs
@property
def is_empty(self):
return len(self._regions) == 0
def extract(self, high_bit, low_bit):
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs.set_si(region, si.extract(high_bit, low_bit))
return new_vs<|fim▁hole|> new_vs = ValueSet()
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs.set_si(region, si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs.set_si(region, si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
else:
for region, si in self._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
else:
for region, si in self._regions.iteritems():
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
return merged_vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with
:return: True if they are exactly same, False otherwise
"""
if self._name != o._name or self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast_base import Base
from .strided_interval import StridedInterval
from .ifproxy import IfProxy
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError<|fim▁end|> |
def concat(self, b): |
<|file_name|>PortletsPanel.js<|end_file_name|><|fim▁begin|>/*global Ext*/
/*jshint strict: false*/
Ext.define('Demo.view.portal.PortletsPanel', {
extend: 'Ext.panel.Panel',
alias: 'widget.portletspanel',
<|fim▁hole|> 'Demo.view.app.Portlet'
]
});<|fim▁end|> | uses: [ |
<|file_name|>stock.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from osv import fields, orm, osv
from tools.translate import _
import netsvc
import tools
class stock_location(orm.Model):
_inherit = "stock.location"
_columns = {
'retention_mode': fields.selection(
[('retention', 'Retention Mode'), ('thru', 'Thru mode')],
'Retention Mode',
required=True,
help="In 'Retention mode' the system wait for the\
whole quantity before the stuff is processed.\n"
"In 'Thru mode' the shipped quantity is processed regardless\
of the ordered quantity."
),
}
_defaults = {
'retention_mode': 'retention',
}
class stock_picking(orm.Model):
_inherit = "stock.picking"
def get_move_chain(self, cr, uid, move_id, context=None, move_obj=False):
'''Recursively get the chained moves
@return list of the chained moves
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, uid, move_id, context, move_obj)
if move_tbc.move_dest_id: # If there is move_dest_id in the chain
move_chain = self.get_move_chain(cr, uid, move_tbc.move_dest_id.id, context)
else:
move_chain = []
move_chain.append(move_tbc)
return move_chain
def copy_pick_chain(self, cr, uid, all_moves, context=None):
'''Copy all the picking related to this order
@return the dictionary of couple: old_pick_id => new_pick_id
'''
new_picks = {}
all_chained_moves = []
sequence_obj = self.pool.get('ir.sequence')
for move in all_moves:
all_chained_moves.extend(self.get_move_chain(cr, uid, move.id, context))
for move in all_chained_moves:
if move.picking_id.id and not new_picks.has_key(move.picking_id.id):
pick_tbc = self.browse(cr, uid, move.picking_id.id, context)
new_note = ((pick_tbc.note if pick_tbc.note else '') + ' Copy of stock.pick[%d].') % move.picking_id.id
new_pick_id = self.copy(cr, uid, move.picking_id.id, {
'state': 'draft',
'note': new_note,
'name': sequence_obj.get(cr, uid, 'stock.picking.%s'%(pick_tbc.type)),
'move_lines' : [],
})
new_picks[move.picking_id.id] = new_pick_id
return new_picks
def copy_move_chain(self, cr, uid, move_id, product_qty, new_picks, context=None, move_obj=False):
'''Recursively copy the chained move until a location in retention mode or the end.
@return id of the new first move.
'''
if not move_obj:
move_obj = self.pool.get('stock.move')
move_tbc = move_obj.browse(cr, uid, move_id, context)
move_dest_id = False
if move_tbc.move_dest_id and move_tbc.location_dest_id.retention_mode == 'thru': # If there is move_dest_id in the chain and the current location is in thru mode, we need to make a copy of that, then use it as new move_dest_id.
move_dest_id = self.copy_move_chain(cr, uid, move_tbc.move_dest_id.id, product_qty, new_picks, context, move_obj)
my_picking_id = (new_picks[move_tbc.picking_id.id] if new_picks.has_key(move_tbc.picking_id.id) else False)
new_note = ((move_tbc.note if move_tbc.note else '') + ' Copy of stock.move[%d].') % move_id
new_move_id = move_obj.copy(cr, uid, move_id, {
'move_dest_id': move_dest_id,
'state': 'waiting',
'note': new_note,
'move_history_ids': False, # Don't inherit child, populate it in next step. The same to next line.
'move_history_ids2': False,
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : my_picking_id,
'price_unit': move_tbc.price_unit,
})
if move_dest_id: # Create the move_history_ids (child) if there is.
move_obj.write(cr, uid, [new_move_id], {'move_history_ids': [(4, move_dest_id)]})
return new_move_id
def update_move_chain_pick(self, cr, uid, move_id, vals, new_picks, context=None):
'''Recursively update the new chained move with the new related picking by the first move id until a location in retention mode or the end.
@return True if ok.
'''
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while True:
vals.update(picking_id=new_picks[move_tbu.picking_id.id])
move_obj.write(cr, uid, [move_tbu.id], vals, context)
if not move_tbu.move_dest_id or move_tbu.location_dest_id.retention_mode != 'thru':
break
move_tbu = move_tbu.move_dest_id
return True
def update_move_chain(self, cr, uid, move_id, vals, context=None):
'''Recursively update the old chained move by the first move id until a location in retention mode or the end.
@return True if ok.
'''
ids = [move_id]
move_obj = self.pool.get('stock.move')
move_tbu = move_obj.browse(cr, uid, move_id, context)
while move_tbu.move_dest_id and move_tbu.location_dest_id.retention_mode == 'thru':
ids.append(move_tbu.move_dest_id.id)
move_tbu = move_tbu.move_dest_id
move_obj.write(cr, uid, ids, vals, context)
return True
def isPickNotEmpty(self, cr, uid, pick_id, move_obj, context=None):
cpt = move_obj.search(
cr, uid,
[('picking_id', '=', pick_id)],
context=context, count=True)
return cpt > 0
def check_production_node_move_chain(
self, cr, uid, move_tbc, context=None):
if move_tbc.location_id.usage == 'production' or \
move_tbc.location_dest_id.usage == 'production':
return True
return False
def has_production_mode(self, cr, uid, all_moves, context=None):
for move in all_moves:
if self.check_production_node_move_chain(cr, uid, move, context):
return True
return False
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, address_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picks = False
complete, too_many, too_few, all_moves = [], [], [], []
move_product_qty = {}
prodlot_ids = {}
product_avail = {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s' % (move.id), {})
#Commented in order to process the less number of stock moves from partial picking wizard
#assert partial_data, _('Missing partial picking data for move #%s') % (move.id)
product_qty = partial_data.get('product_qty') or 0.0
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom') or False
product_price = partial_data.get('product_price') or 0.0
product_currency = partial_data.get('product_currency') or False
prodlot_id = partial_data.get('prodlot_id') or False
prodlot_ids[move.id] = prodlot_id
all_moves.append(move)
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if product.id in product_avail:
product_avail[product.id] += qty
else:
product_avail[product.id] = product.qty_available
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id]) \
+ (new_price * qty)) / (product_avail[product.id] + qty)
# Write the field according to price type field
product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
if not too_few:
res = super(stock_picking, self).do_partial(cr, uid, [pick.id], partial_datas, context=context)
else:
if self.has_production_mode(cr, uid, all_moves, context=context):# check if there is a production location in the chain
res[pick.id] = super(stock_picking, self).do_partial(cr, uid, [pick.id], partial_datas, context=context)
#res[pick.id]['warning'] = {'title': _('Warning'), 'message': _('One of your location destinations type is Production. Only the first pick has been split.')}
else:
new_picks = self.copy_pick_chain(cr, uid, all_moves, context)
for move in too_few:
product_qty = move_product_qty[move.id] #actual received quantity
if product_qty != 0:
"""Copy not only one move, but all the moves where the destination location is in THRU MODE """
new_move_id = self.copy_move_chain(cr, uid, move.id, product_qty, new_picks, context)
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
self.update_move_chain(cr, uid, new_move_id, {
'prodlot_id': prodlot_id,
}, context)
"""Update the old moves with the remaining quantity"""
self.update_move_chain(cr, uid, move.id, {
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,#TODO: put correct uos_qty
}, context)
else:
#EC self.write(cr, uid, [move.id],
move_obj.write(cr, uid, [move.id],#EC
{
'states': 'waiting',
})
for move in complete:
defaults = {}
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if move.location_id.retention_mode == 'thru':
self.update_move_chain_pick(cr, uid, move.id, defaults, new_picks, context)
else:
move_obj.write(cr, uid, [move.id], {'picking_id' : new_picks[move.picking_id.id]}, context)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {}
defaults_1st_move = {
'picking_id' : new_picks[move.picking_id.id],
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
defaults_1st_move.update(prodlot_id=prodlot_id)
move_obj.write(cr, uid, [move.id], defaults_1st_move, context)
if move.location_id.retention_mode == 'thru':
self.update_move_chain_pick(cr, uid, move.id, defaults, new_picks, context)
else:
move_obj.write(cr, uid, [move.id], {'picking_id' : new_picks[move.picking_id.id]}, context)
# At first we confirm the new pickings (if necessary)
for old_pick, new_pick in new_picks.iteritems():
# check if the old pick is empty
if not self.isPickNotEmpty(cr, uid, old_pick, move_obj, context):
self.unlink(cr, uid, [old_pick])
# check if the new pick is not empty
if self.isPickNotEmpty(cr, uid, new_pick, move_obj, context):
if self.isPickNotEmpty(cr, uid, old_pick, move_obj, context):
self.write(cr, uid, [old_pick], {'backorder_id': new_pick})
wf_service.trg_validate(uid, 'stock.picking', new_pick, 'button_confirm', cr)
# Alex commented this, fix task:4547
# self.action_move(cr, uid, [new_pick])
else:
self.unlink(cr, uid, [new_pick])
#pick.refresh() <= Works on 6.1<|fim▁hole|> for move in pick_hack.backorder_id.move_lines:
move_obj.action_assign(cr, uid, [move.id])
# The pick is set as "confirmed" then "done"
wf_service.trg_validate(uid, 'stock.picking', new_picks[pick.id], 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
#pick.refresh() <= Works on 6.1
# Finally we set the next pick as "assigned"
pick_hack = self.browse(cr, uid, pick.id, context=context)
for move in pick_hack.backorder_id.move_lines:
if move.move_dest_id.picking_id and self.test_assigned(cr, uid, [move.move_dest_id.picking_id.id]):
self.action_assign_wkf(cr, uid, [move.move_dest_id.picking_id.id], context=context)
res[pick.id] = {'delivered_picking': new_picks[pick.id] or False}
return res
class stock_move(orm.Model):
_name = "stock.move"
_inherit = "stock.move"
def copy_move_chain(self, cr, uid, move_id, product_qty, context=None):
'''Recursively copy the chained move until a location in retention mode or the end.
@return id of the new first move.
'''
move_tbc = self.browse(cr, uid, move_id, context)
move_dest_id = False
if move_tbc.move_dest_id and move_tbc.location_dest_id.retention_mode == 'thru': # If there is move_dest_id in the chain and the current location is in thru mode, we need to make a copy of that, then use it as new move_dest_id.
move_dest_id = self.copy_move_chain(cr, uid, move_tbc.move_dest_id.id, product_qty, context)
new_note = ((move_tbc.note if move_tbc.note else '') + ' Copy of stock.move[%d].') % move_id
new_move_id = self.copy(cr, uid, move_id, {
'move_dest_id': move_dest_id,
'state': 'waiting',
'note': new_note,
'move_history_ids': False, # Don't inherit child, populate it in next step. The same to next line.
'move_history_ids2': False,
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move_tbc.picking_id.id,
'price_unit': move_tbc.price_unit,
'auto_validate': False
})
if move_dest_id: # Create the move_history_ids (child) if there is.
self.write(cr, uid, [new_move_id], {'move_history_ids': [(4, move_dest_id)]})
return new_move_id
def update_move_chain(self, cr, uid, move_id, vals, context=None):
'''Recursively update the chained move by the first move id until a location in retention mode or the end.
@return True if ok.
'''
if isinstance(move_id, list):
move_id = move_id[0]
ids = [move_id]
move_tbu = self.browse(cr, uid, move_id, context)
move_location = self.browse(cr, uid, move_tbu.location_id, context)
while move_tbu.move_dest_id and move_tbu.location_dest_id.retention_mode == 'thru':
ids.append(move_tbu.move_dest_id.id)
move_tbu = move_tbu.move_dest_id
self.write(cr, uid, ids, vals, context)
return True
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, address_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context)[product.id]
new_std_price = ((amount_unit * product.qty_available)\
+ (new_price * qty))/(product.qty_available + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
"""Copy not only one move, but all the moves where the destination location is in THRU MODE """
new_move_id = self.copy_move_chain(cr, uid, move.id, product_qty, context)
complete.append(self.browse(cr, uid, new_move_id))
"""Update not only one move, but all the moves where the destination location is in THRU MODE """
self.update_move_chain(cr, uid, [move.id], {
'product_qty' : move.product_qty - product_qty,
'product_uos_qty':move.product_qty - product_qty,
}, context)
else:
self.write(cr, uid, [move.id],
{
'states' : 'waiting',
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]<|fim▁end|> | # Here we set the moves as "assigned"
pick_hack = self.browse(cr, uid, pick.id, context=context) |
<|file_name|>RS_HUD_OptimizedMobile.js<|end_file_name|><|fim▁begin|>//================================================================
// RS_HUD_OptimizedMobile.js
// ---------------------------------------------------------------
// The MIT License
// Copyright (c) 2015 biud436
// ---------------------------------------------------------------
// Free for commercial and non commercial use.
//================================================================
/*:
* RS_HUD_OptimizedMobile.js
* @plugindesc (v1.0.1) This plugin draws the HUD, which displays the hp and mp and exp and level of each party members.
*
* @author biud436
*
* @param --- Image Name
*
* @param Texture Atlas
* @parent --- Image Name
* @desc import texture atlas.
* @default config
* @require 1
* @dir img/rs_hud/
* @type file
*
* @param EXP Gauge
* @parent --- Image Name
* @desc Specifies to import file in the path named 'exr'
* @default exr
*
* @param Empty Gauge
* @parent --- Image Name
* @desc Specifies to import file in the path named 'gauge'
* @default gauge
*
* @param HP Gauge
* @parent --- Image Name
* @desc Specifies to import file in the path named 'hp'
* @default hp
*
* @param MP Gauge
* @parent --- Image Name
* @desc Specifies to import file in the path named 'mp'
* @default mp
*
* @param HUD Background
* @parent --- Image Name
* @desc Specifies to import file in the path named 'hud_window_empty'
* @default hud_window_empty
*
* @param Masking
* @parent --- Image Name
* @desc Specifies to import file in the path named 'masking'
* @default masking
*
* @param --- Image Custom Position
*
* @param Face Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the face sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 0, 0, true
*
* @param HP Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the hp sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 160, 41, true
*
* @param MP Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the mp sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 160, 67, true
*
* @param EXP Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the exp sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 83, 89, true
*
* @param HP Text Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the hp text sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 160, 51, true
*
* @param MP Text Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the mp text sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 160, 77, true
*
* @param Level Text Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the level text sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 60, 78, true
*
* @param EXP Text Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the exp text sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 120.5, 91, true
*
* @param Name Text Position
* @parent --- Image Custom Position
* @desc Specifies the properties of the name text sprite by x, y, visible
* (Draw it at changing position relative to a background sprite)
* @default 54, 51, false
*
* @param --- Noraml
*
* @param Width
* @parent --- Noraml
* @desc Do not change this when you are using the default sprite batch.
* (default : 317)
* @default 317
*
* @param Height
* @parent --- Noraml
* @desc Do not change this when you are using the default sprite batch.
* (default : 101)
* @default 101
*
* @param Margin
* @parent --- Noraml
* @type number
* @min 0
* @desc Sets the margin to the HUD borders.
* @default 0
*
* @param Gaussian Blur
* @parent --- Noraml
* @type boolean
* @desc Sets the Gaussian Blur.
* @default true
*
* @param Show
* @parent --- Noraml
* @type boolean
* @desc Sets the visible status. (default : true)
* @default true
*
* @param Opacity
* @parent --- Noraml
* @type number
* @min 0
* @max 255
* @desc Sets the opacity.
* @default 255
*
* @param Arrangement
* @parent --- Noraml
* @type string[]
* @desc Create an array to set the anchor of each HUD.
* @default ["LeftTop", "LeftBottom", "RightTop", "RightBottom"]
*
* @param Anchor
* @parent --- Noraml
* @desc If anchor is not found, HUD would set to this anchor.
* @default LeftTop
*
* @param preloadImportantFaces
* @parent --- Noraml
* @type string[]
* @desc Allow you to pre-load the base face chips.
* (If you do not set this parameter, It can cause errors in the game.)
* @default ["Actor1", "Actor2", "Actor3"]
*
* @param Battle Only
* @parent --- Noraml
* @type boolean
* @desc If you want to use the HUD only in battles.
* (default : false)
* @default false
*
* @param Show Comma
* @parent --- Noraml
* @type boolean
* @desc Sets the value that indicates whether this parameter displays
* the values with commas every three digits.
* @default false
*
* @param Max Exp Text
* @parent --- Noraml
* @desc
* @default ------/------
*
* @param Max Members
* @parent --- Noraml
* @type number
* @min 1
* @desc Specifies the maximum number of party members that displays within the game screen.
* @default 4
*
* @param --- Font
*
* @param Chinese Font
* @parent --- Font
* @desc Specifies the desired fonts
* @default SimHei, Heiti TC, sans-serif
*
* @param Korean Font
* @parent --- Font
* @desc Specifies the desired fonts
* @default NanumGothic, Dotum, AppleGothic, sans-serif
*
* @param Standard Font
* @parent --- Font
* @desc Specifies to import a css for the font file from ./fonts folder.
* @default GameFont
*
* @param Level Text Size
* @parent --- Font
* @desc Specify the text size for levels.
* @default 24
*
* @param HP Text Size
* @parent --- Font
* @desc Specify the text size for HP.
* @default 12
*
* @param MP Text Size
* @parent --- Font
* @desc Specify the text size for MP.
* @default 12
*
* @param EXP Text Size
* @parent --- Font
* @desc Specify the text size for EXP.
* @default 12
*
* @param Name Text Size
* @parent --- Font
* @desc Specify the text size for names.
* @default 12
*
* @param --- Text Color
*
* @param HP Color
* @parent --- Text Color
* @desc Specify the text color for HP.
* @default #ffffff
*
* @param MP Color
* @parent --- Text Color
* @desc Specify the text color for MP.
* @default #ffffff
*
* @param EXP Color
* @parent --- Text Color
* @desc Specify the text color for EXP.
* @default #ffffff
*
* @param Level Color
* @parent --- Text Color
* @desc Specify the text color for levels.
* @default #ffffff
*
* @param Name Color
* @parent --- Text Color
* @desc Specify the text color for names.
* @default #ffffff
*
* @param --- Text Outline Color
*
* @param HP Outline Color
* @parent --- Text Outline Color
* @desc Specify the text outline color for HP.
* @default rgba(0, 0, 0, 0.5)
*
* @param MP Outline Color
* @parent --- Text Outline Color
* @desc Specify the text outline color for MP.
* @default rgba(0, 0, 0, 0.5)
*
* @param EXP Outline Color
* @parent --- Text Outline Color
* @desc Specify the text outline color for EXP.
* @default rgba(0, 0, 0, 0.5)
*
* @param Level Outline Color
* @parent --- Text Outline Color
* @desc Specify the text outline color for levels.
* @default rgba(0, 0, 0, 0.5)
*
* @param Name Outline Color
* @parent --- Text Outline Color
* @desc Specify the text outline color for names.
* @default rgba(0, 0, 0, 0.5)
*
* @param --- Text Outline Width
*
* @param HP Outline Width
* @parent --- Text Outline Width
* @desc Specify the maximum width of a text border line for HP.
* @default 4
*
* @param MP Outline Width
* @parent --- Text Outline Width
* @desc Specify the maximum width of a text border line for MP.
* @default 4
*
* @param EXP Outline Width
* @parent --- Text Outline Width
* @desc Specify the maximum width of a text border line for EXP.
* @default 4
*
* @param Level Outline Width
* @parent --- Text Outline Width
* @desc Specify the maximum width of a text border line for levels.
* @default 4
*
* @param Name Outline Width
* @parent --- Text Outline Width
* @desc Specify the maximum width of a text border line for names.
* @default 4
*
* @param --- Custom Font
*
* @param Using Custom Font
* @parent --- Custom Font
* @type boolean
* @desc Specify whether the custom font shows (default : false)
* YES - true / NO - false
* @default false
*
* @param Custom Font Name
* @parent --- Custom Font
* @desc Specify the name of the custom font
* @default NanumBrush
*
* @param Custom Font Src
* @parent --- Custom Font
* @desc Specify the path of the font file from a game project folder
* @default fonts/NanumBrush.ttf
*
* @param --- Custom HUD Anchor
*
* @param Custom Pos 1
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to a custom position of the help section)
* @default 0, (H * 0) + PD
*
* @param Custom Pos 2
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default 0, (H * 1) + PD
*
* @param Custom Pos 3
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default 0, (H * 2) + PD
*
* @param Custom Pos 4
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default 0, (H * 3) + PD
*
* @param Custom Pos 5
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default 0, (H * 4) + PD
*
* @param Custom Pos 6
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default W + PD, (H * 0) + PD
*
* @param Custom Pos 7
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default W + PD, (H * 1) + PD
*
* @param Custom Pos 8
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default W + PD, (H * 2) + PD
*
* @param Custom Pos 9
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default W + PD, (H * 3) + PD
*
* @param Custom Pos 10
* @parent --- Custom HUD Anchor
* @desc Predefined Variables : W, H, PD, BW, BH
* (Please refer to the help section)
* @default W + PD, (H * 4) + PD
*
* @help
* =============================================================================
* Installations
* =============================================================================
*
* Download the resources and place them in your img/rs_hud folder.
* All the resources can download in the following link.
* img/rs_hud/config.json : https://github.com/biud436/MV/raw/master/HUD/config.json
* img/rs_hud/config.png : https://github.com/biud436/MV/raw/master/HUD/config.png
*
* Github Link : https://github.com/biud436/MV/blob/master/HUD/RS_HUD_OptimizedMobile.js
*
* =============================================================================
* Custom Positions
* =============================================================================
*
* To display in correct place, you need to know which predefined variables are currently available.
* You can be available predefined variables as belows when specifying the parameter
* named 'Custom Pos'. So you can quickly set up positions for a hud itself.
*
* Predefined Variables :
* W - 'W' is the same as a parameter named 'Width' in Plugin Manager.
* H - 'H' is the same as a parameter named 'Height' in Plugin Manager
* PD - 'PD' is the same as a parameter named 'Margin' in Plugin Manager
* BW - 'BW' is the same as a maximum width of the game canvas.
* BH - 'BH' is the same as a maximum height of the game canvas.
*
* Each sprites draw at changing position relative to the background sprite.
* Therefore, this custom position is pretty important values.
*
* =============================================================================
* Notetags
* =============================================================================
*
* Insert the following the notetag into the map property window as below.
* <DISABLE_HUD> : A notetag can use in the map that does not wish create each huds.
*
* =============================================================================
* Script Calls
* =============================================================================
*
* -----------------------------------------------------------------------------
* Set Opacity
* -----------------------------------------------------------------------------
* Sets the opacity of the HUD to x.
*
* $gameHud.opacity = x;
*
* That is a number between 0 and 255.
*
* For example :
* $gameHud.opacity = 128;
*
* -----------------------------------------------------------------------------
* Set visibility
* -----------------------------------------------------------------------------
* This variable will change the visible option of the HUD.
*
* $gameHud.show = true/false;
*
* For example :
* $gameHud.show = false;
*
* -----------------------------------------------------------------------------
* Refresh Texts
* -----------------------------------------------------------------------------
* In general, text and gauge sprites refresh when requesting a refresh so this one is not
* updated on every frame. Therefore if you need to immediately refresh
* all texts for themselves, you will use as belows.
*
* $gameTemp.notifyHudTextRefresh();
*
* -----------------------------------------------------------------------------
* Clear and create all huds
* -----------------------------------------------------------------------------
* if you need to immediately recreate for all Huds, you will use as belows.
*
* $gameTemp.notifyHudRefresh();
* =============================================================================
* Plugin Commands
* =============================================================================
*
* RS_HUD Opacity x : This command sets up the opacity for all hud elements.
* 'x' is a number value between 0 and 255.
*<|fim▁hole|> * RS_HUD Visible true/false : This command sets up whether it displays all containers for HUD.
* 'RS_HUD Visible true' sets its visibility to true.
* 'RS_HUD Visible false' sets its visibility to false.
*
* RS_HUD import file_name : This command imports the parameter as the json file from your data folder.
* RS_HUD export file_name : This command exports the parameter as the json file to your data folder.
*
* =============================================================================
* Change Log
* =============================================================================
* 2018.03.16 (v1.0.0) - First Release (forked in RS_HUD_4m)
* 2018.05.09 (v1.0.1) - Supported a face image that is made using SumRndmDde's CharacterCreatorEX plugin.
*/
var Imported = Imported || {};
Imported.RS_HUD_OptimizedMobile = '1.0.1';
var $gameHud = null;
var RS = RS || {};
RS.HUD = RS.HUD || {};
RS.HUD.param = RS.HUD.param || {};
(function() {
if(Utils.RPGMAKER_VERSION < '1.5.0') {
console.warn('Note that RS_HUD_4m plugin can use only in RMMV v1.5.0 or above.');
return;
}
var parameters = PluginManager.parameters('RS_HUD_OptimizedMobile');
// Image Settings
RS.HUD.param.imgEXP = String(parameters['EXP Gauge'] || 'exr');
RS.HUD.param.imgEmptyGauge = String(parameters['Empty Gauge'] || 'gauge');
RS.HUD.param.imgHP = String(parameters['HP Gauge'] || 'hp');
RS.HUD.param.imgMP = String(parameters['MP Gauge'] || 'mp');
RS.HUD.param.imgEmptyHUD = String(parameters['HUD Background'] || 'hud_window_empty');
RS.HUD.param.imgMasking = String(parameters['Masking'] || 'masking');
// Image Position
RS.HUD.loadImagePosition = function (szRE) {
var target = szRE.match(/(.*),(.*),(.*)/i);
var x = parseFloat(RegExp.$1);
var y = parseFloat(RegExp.$2);
var visible = Boolean(String(RegExp.$3).contains('true'));
return {'x': x, 'y': y, 'visible': visible};
};
RS.HUD.loadRealNumber = function (paramName, val) {
var value = Number(parameters[paramName]);
switch (typeof(value)) {
case 'object': case 'undefined':
value = val;
break;
}
return value;
};
RS.HUD.param.ptFace = RS.HUD.loadImagePosition(parameters['Face Position'] || '0, 0, true');
RS.HUD.param.ptHP = RS.HUD.loadImagePosition(parameters['HP Position'] || '160, 43, true');
RS.HUD.param.ptMP = RS.HUD.loadImagePosition(parameters['MP Position'] || '160, 69, true');
RS.HUD.param.ptEXP = RS.HUD.loadImagePosition(parameters['EXP Position'] || '83, 91, true');
RS.HUD.param.ptHPText = RS.HUD.loadImagePosition(parameters['HP Text Position'] || '160, 53, true');
RS.HUD.param.ptMPText = RS.HUD.loadImagePosition(parameters['MP Text Position'] || '160, 79, true');
RS.HUD.param.ptLevelText = RS.HUD.loadImagePosition(parameters['Level Text Position'] || '60, 80, true');
RS.HUD.param.ptEXPText = RS.HUD.loadImagePosition(parameters['EXP Text Position'] || '120.5, 93, true');
RS.HUD.param.ptNameText = RS.HUD.loadImagePosition(parameters['Name Text Position'] || '54, 53, true');
// Normal Settings
RS.HUD.param.nWidth = RS.HUD.loadRealNumber('Width', 317);
RS.HUD.param.nHeight = RS.HUD.loadRealNumber('Height', 101);
RS.HUD.param.nPD = RS.HUD.loadRealNumber('Margin', 0);
RS.HUD.param.blurProcessing = Boolean(parameters['Gaussian Blur'] === "true");
RS.HUD.param.bShow = Boolean(parameters['Show'] ==="true");
RS.HUD.param.nOpacity = RS.HUD.loadRealNumber('Opacity', 255);
RS.HUD.param.szAnchor = String(parameters['Anchor'] || "LeftTop");
RS.HUD.param.arrangement = eval(parameters['Arrangement']);
RS.HUD.param.preloadImportantFaces = eval(parameters['preloadImportantFaces'] || 'Actor1');
RS.HUD.param.battleOnly = Boolean(parameters['Battle Only'] === "true");
RS.HUD.param.showComma = Boolean(parameters['Show Comma'] === 'true');
RS.HUD.param.maxExpText = String(parameters['Max Exp Text'] || "------/------");
RS.HUD.param.nMaxMembers = parseInt(parameters["Max Members"] || 4);
RS.HUD.getDefaultHUDAnchor = function () {
var anchor = {
"LeftTop": {x: RS.HUD.param.nPD, y: RS.HUD.param.nPD},
"LeftBottom": {x: RS.HUD.param.nPD, y: Graphics.boxHeight - RS.HUD.param.nHeight - RS.HUD.param.nPD},
"RightTop": {x: Graphics.boxWidth - RS.HUD.param.nWidth - RS.HUD.param.nPD, y: RS.HUD.param.nPD},
"RightBottom": {x: Graphics.boxWidth - RS.HUD.param.nWidth - RS.HUD.param.nPD, y: Graphics.boxHeight - RS.HUD.param.nHeight - RS.HUD.param.nPD}
};
return anchor;
};
// Font Settings
RS.HUD.param.chineseFont = String(parameters['Chinese Font'] || 'SimHei, Heiti TC, sans-serif');
RS.HUD.param.koreanFont = String(parameters['Korean Font'] || 'NanumGothic, Dotum, AppleGothic, sans-serif');
RS.HUD.param.standardFont = String(parameters['Standard Font'] || 'GameFont');
// Text Size
RS.HUD.param.levelTextSize = RS.HUD.loadRealNumber('Level Text Size', 12);
RS.HUD.param.hpTextSize = RS.HUD.loadRealNumber('HP Text Size', 12);
RS.HUD.param.mpTextSize = RS.HUD.loadRealNumber('MP Text Size', 12);
RS.HUD.param.expTextSize = RS.HUD.loadRealNumber('EXP Text Size', 12);
RS.HUD.param.nameTextSize = RS.HUD.loadRealNumber('Name Text Size', 12);
// Text Color
RS.HUD.param.szHpColor = String(parameters['HP Color'] || '#ffffff');
RS.HUD.param.szMpColor = String(parameters['MP Color'] || '#ffffff');
RS.HUD.param.szExpColor = String(parameters['EXP Color'] || '#ffffff');
RS.HUD.param.szLevelColor = String(parameters['Level Color'] || '#ffffff');
RS.HUD.param.szNameColor = String(parameters['Name Color'] || '#ffffff');
// Text Outline Color
RS.HUD.param.szHpOutlineColor = String(parameters['HP Outline Color'] || 'rgba(0, 0, 0, 0.5)');
RS.HUD.param.szMpOutlineColor = String(parameters['MP Outline Color'] || 'rgba(0, 0, 0, 0.5)');
RS.HUD.param.szExpOutlineColor = String(parameters['EXP Outline Color'] || 'rgba(0, 0, 0, 0.5)');
RS.HUD.param.szLevelOutlineColor = String(parameters['Level Outline Color'] || 'rgba(0, 0, 0, 0.5)');
RS.HUD.param.szNameOutlineColor = String(parameters['Name Outline Color'] || 'rgba(0, 0, 0, 0.5)');
// Text Outline Width
RS.HUD.param.szHpOutlineWidth = RS.HUD.loadRealNumber('HP Outline Width', 4);
RS.HUD.param.szMpOutlineWidth = RS.HUD.loadRealNumber('MP Outline Width', 4);
RS.HUD.param.szExpOutlineWidth = RS.HUD.loadRealNumber('EXP Outline Width', 4);
RS.HUD.param.szLevelOutlineWidth = RS.HUD.loadRealNumber('Level Outline Width', 4);
RS.HUD.param.szNameOutlineWidth = RS.HUD.loadRealNumber('Name Outline Width', 4);
// Custom Font
RS.HUD.param.bUseCustomFont = Boolean(parameters['Using Custom Font'] === 'true');
RS.HUD.param.szCustomFontName = String(parameters['Custom Font Name'] || 'GameFont' );
RS.HUD.param.szCustomFontSrc = String(parameters['Custom Font Src'] || 'fonts/mplus-1m-regular.ttf');
// Custom HUD Anchor
RS.HUD.param.ptCustormAnchor = [];
RS.HUD.param.isCurrentBattleShowUp = false;
RS.HUD.param.isPreviousShowUp = false;
RS.HUD.param.init = false;
PIXI.loader
.add("config", "img/rs_hud/config.json")
.load(onAssetsLoaded);
function onAssetsLoaded() {
RS.HUD.param.init = true;
};
RS.HUD.loadCustomPosition = function (szRE) {
var W = RS.HUD.param.nWidth;
var H = RS.HUD.param.nHeight;
var PD = RS.HUD.param.nPD;
var BW = Graphics.boxWidth || 816;
var BH = Graphics.boxHeight || 624;
var x = eval('[' + szRE + ']');
if(x instanceof Array) return new Point(x[0], x[1]);
return new Point(0, 0);
};
// Opacity and Tone Glitter Settings
var nOpacityEps = 5;
var nOpacityMin = 64;
var nFaceDiameter = 96;
var nHPGlitter = 0.4;
var nMPGlitter = 0.4;
var nEXPGlitter = 0.7;
var defaultTemplate = 'hud_default_template.json';
//----------------------------------------------------------------------------
// Data Imports & Exports
//
//
RS.HUD.localFilePath = function (fileName) {
if(!Utils.isNwjs()) return '';
var path, base;
path = require('path');
base = path.dirname(process.mainModule.filename);
return path.join(base, 'data/') + (fileName || defaultTemplate);
};
RS.HUD.exportData = function (fileName) {
var fs, data, filePath;
if(!Utils.isNwjs()) return false;
if(!RS.HUD.param) return false;
fs = require('fs');
data = JSON.stringify(RS.HUD.param);
filePath = RS.HUD.localFilePath(fileName);
fs.writeFile(filePath, data, 'utf8', function (err) {
if (err) throw err;
});
};
RS.HUD.loadData = function (data) {
var params = Object.keys(RS.HUD.param);
data = JSON.parse(data);
params.forEach(function (name) {
RS.HUD.param[name] = data[name];
}, this);
setTimeout(function () {
$gameTemp.notifyHudRefresh();
}, 0);
};
RS.HUD.importData = function (fileName) {
if(!Utils.isNwjs()) return false;
var fs = require('fs');
var filePath = RS.HUD.localFilePath(fileName);
var data = fs.readFileSync(filePath, { encoding: 'utf8' });
RS.HUD.loadData(data);
};
RS.HUD.importDataWithAjax = function (fileName) {
var xhr = new XMLHttpRequest();
var self = RS.HUD;
var url = './data/' + (fileName || defaultTemplate);
xhr.open('GET', url);
xhr.onload = function() {
if(xhr.status < 400) {
RS.HUD.loadData(xhr.responseText.slice(0));
}
}
xhr.send();
};
RS.HUD.loadPicture = function (filename) {
var bitmap = ImageManager.reservePicture(filename);
return bitmap;
};
RS.HUD.loadFace = function (filename) {
var bitmap = ImageManager.reserveFace(filename);
return bitmap;
};
//----------------------------------------------------------------------------
// Bitmap
//
//
Bitmap.prototype.drawClippingImage = function(bitmap, maskImage , _x, _y, _sx, _sy) {
var context = this._context;
context.save();
context.drawImage(maskImage._canvas, _x, _y, nFaceDiameter, nFaceDiameter);
context.globalCompositeOperation = 'source-atop';
context.drawImage(bitmap._canvas, _sx, _sy, 144, 144, 0, 0, nFaceDiameter, nFaceDiameter);
context.restore();
this._setDirty();
};
Bitmap.prototype.drawClippingImageNonBlur = function(bitmap, _x, _y, _sx, _sy) {
var context = this._context;
context.save();
context.beginPath();
context.arc(_x + 45, _y + 45 , 45, 0, Math.PI * 2, false);
context.clip();
context.drawImage(bitmap._canvas, _sx, _sy, 144, 144, 0, 0, nFaceDiameter, nFaceDiameter);
context.restore();
this._setDirty();
};
//----------------------------------------------------------------------------
// Game_Temp
//
//
Game_Temp.prototype.notifyHudTextRefresh = function() {
if($gameHud) $gameHud.updateText();
};
Game_Temp.prototype.notifyHudRefresh = function() {
if($gameHud) $gameHud.refresh();
};
//----------------------------------------------------------------------------
// Game_System ($gameSystem)
//
//
var _alias_Game_System_initialize = Game_System.prototype.initialize;
Game_System.prototype.initialize = function() {
_alias_Game_System_initialize.call(this);
this._rs_hud = this._rs_hud || {};
this._rs_hud.show = this._rs_hud.show || RS.HUD.param.bShow;
this._rs_hud.opacity = this._rs_hud.opacity || RS.HUD.param.nOpacity;
};
//----------------------------------------------------------------------------
// Game_Battler
//
//
var alias_Game_Battler_refresh = Game_Battler.prototype.refresh;
Game_Battler.prototype.refresh = function() {
alias_Game_Battler_refresh.call(this);
$gameTemp.notifyHudTextRefresh();
};
//----------------------------------------------------------------------------
// Game_Actor
//
//
Game_Actor.prototype.relativeExp = function () {
if(this.isMaxLevel()) {
return this.expForLevel(this.maxLevel());
} else {
return this.currentExp() - this.currentLevelExp();
}
};
Game_Actor.prototype.relativeMaxExp = function () {
if(!this.isMaxLevel()) {
return this.nextLevelExp() - this.currentLevelExp();
} else {
return this.expForLevel(this.maxLevel());
}
};
//----------------------------------------------------------------------------
// Game_Party
//
//
var alias_Game_Party_swapOrder = Game_Party.prototype.swapOrder;
Game_Party.prototype.swapOrder = function(index1, index2) {
alias_Game_Party_swapOrder.call(this, index1, index2);
$gameTemp.notifyHudRefresh();
};
//----------------------------------------------------------------------------
// TextData
//
//
function TextData() {
this.initialize.apply(this, arguments);
}
TextData.prototype = Object.create(Sprite.prototype);
TextData.prototype.constructor = TextData;
TextData.prototype.initialize = function(bitmap, func, params) {
Sprite.prototype.initialize.call(this, bitmap);
this.setCallbackFunction(func);
this.updateTextLog();
this._params = params;
this.requestUpdate();
};
TextData.prototype.setCallbackFunction = function (cbFunc) {
this._callbackFunction = cbFunc;
};
TextData.prototype.updateTextLog = function () {
this._log = this._callbackFunction.call();
};
TextData.prototype.startCallbackFunction = function () {
this._callbackFunction.call(this);
};
TextData.prototype.getTextProperties = function (n) {
return this._params[n];
};
TextData.prototype.drawDisplayText = function () {
this.defaultFontSettings();
this.bitmap.drawText(this._callbackFunction(this), 0, 0, 120, this._params[0] + 8, 'center');
};
TextData.prototype.isRefresh = function () {
var currentText = this._callbackFunction();
return currentText.localeCompare(this._log) !== 0;
};
TextData.prototype.clearTextData = function () {
this.bitmap.clear();
};
TextData.prototype.requestUpdate = function () {
this.clearTextData();
this.drawDisplayText();
this.updateTextLog();
};
TextData.prototype.standardFontFace = function() {
if(RS.HUD.param.bUseCustomFont) {
return RS.HUD.param.szCustomFontName;
} else {
if (navigator.language.match(/^zh/)) {
return RS.HUD.param.chineseFont;
} else if (navigator.language.match(/^ko/)) {
return RS.HUD.param.koreanFont;
} else {
return RS.HUD.param.standardFont;
}
}
};
TextData.prototype.defaultFontSettings = function() {
var param = this._params;
this.bitmap.fontFace = this.standardFontFace();
this.bitmap.fontSize = param[0];
this.bitmap.textColor = param[1];
this.bitmap.outlineColor = param[2];
this.bitmap.outlineWidth = param[3];
};
//----------------------------------------------------------------------------
// HUD
//
//
function HUD() {
this.initialize.apply(this, arguments);
};
//----------------------------------------------------------------------------
// RS_HudLayer
//
//
function RS_HudLayer() {
this.initialize.apply(this, arguments);
};
RS_HudLayer.prototype = Object.create(Sprite.prototype);
RS_HudLayer.prototype.constructor = RS_HudLayer;
RS_HudLayer.prototype.initialize = function(bitmap) {
Sprite.prototype.initialize.call(this, bitmap);
this.alpha = 0;
this.createItemLayer();
};
RS_HudLayer.prototype.createItemLayer = function () {
this._items = new Sprite();
this._items.setFrame(0, 0, Graphics.boxWidth, Graphics.boxHeight);
this.addChild(this._items);
};
RS_HudLayer.prototype.drawAllHud = function() {
var allHud = this._items;
var items = RS.HUD.param.arrangement;
// This removes any drawing objects that have already been created.
if(allHud.children.length > 0) {
allHud.removeChildren(0, allHud.children.length);
}
items.forEach(function(item, index){
// This code runs only when there is a party member at a specific index.
if(!!$gameParty.members()[index]) {
if(item !== null) allHud.addChild(new HUD({szAnchor: item, nIndex: index}));
}
}, this);
// It sorts objects by party number.
this.sort();
this.show = $gameSystem._rs_hud.show;
this.opacity = $gameSystem._rs_hud.opacity;
};
RS_HudLayer.prototype.update = function () {
var members = $gameParty.members();
this.children.forEach(function(child, idx) {
if (child.update && members[idx]) {
child.update();
}
});
};
RS_HudLayer.prototype.sort = function() {
var allHud = this._items;
var array = allHud.children;
allHud.children = array.sort(function(a, b) {
return a._memberIndex - b._memberIndex;
});
}
RS_HudLayer.prototype.refresh = function() {
var allHud = this._items;
allHud.children.forEach(function(i) {
allHud.removeChild(i);
}, this);
this.drawAllHud();
this.show = $gameSystem._rs_hud.show;
};
RS_HudLayer.prototype.updateText = function() {
var allHud = this._items;
allHud.children.forEach(function(i) {
i.updateText();
}, this);
};
RS_HudLayer.prototype.updateFrame = function () {
var allHud = this._items;
allHud.children.forEach(function(i) {
i.paramUpdate();
}, this);
};
RS_HudLayer.prototype.remove = function(index) {
var self = this;
setTimeout(function() {
while($gameParty.size() !== self._items.children.length) {
self.drawAllHud();
}
}, 0);
};
Object.defineProperty(RS_HudLayer.prototype, 'show', {
get: function() {
return this.visible;
},
set: function(value) {
this.visible = value;
$gameSystem._rs_hud.show = value;
},
});
Object.defineProperty(RS_HudLayer.prototype, 'opacity', {
get: function() {
return Math.floor(this.alpha * 255);
},
set: function(value) {
this.alpha = value * 0.00392156862745098;
$gameSystem._rs_hud.opacity = value.clamp(0, 255);
},
});
//----------------------------------------------------------------------------
// RS_EmptyHudLayer
//
//
function RS_EmptyHudLayer() {
this.initialize.apply(this, arguments);
}
RS_EmptyHudLayer.prototype = Object.create(Sprite.prototype);
RS_EmptyHudLayer.prototype.constructor = RS_EmptyHudLayer;
RS_EmptyHudLayer.prototype.initialize = function(bitmap) {
Sprite.prototype.initialize.call(this, bitmap);
this.alpha = 0;
};
RS_EmptyHudLayer.prototype.constructor = RS_EmptyHudLayer;
Object.defineProperty(RS_EmptyHudLayer.prototype, 'show', {
get: function() {
return $gameSystem._rs_hud.show;
},
set: function(value) {
$gameSystem._rs_hud.show = value;
}
});
Object.defineProperty(RS_EmptyHudLayer.prototype, 'opacity', {
get: function() {
return $gameSystem._rs_hud.opacity;
},
set: function(value) {
$gameSystem._rs_hud.opacity = value.clamp(0, 255);
}
});
//----------------------------------------------------------------------------
// HUD
//
//
HUD.prototype = Object.create(Stage.prototype);
HUD.prototype.constructor = HUD;
HUD.prototype.initialize = function(config) {
Stage.prototype.initialize.call(this);
this.createHud();
this.setAnchor(config.szAnchor || "LeftBottom");
this.setMemberIndex(parseInt(config.nIndex) || 0);
this.createFace();
this.createHp();
this.createMp();
this.createExp();
this.createText();
this.setPosition();
this.paramUpdate();
};
HUD.prototype.getAnchor = function(magnet) {
var anchor = RS.HUD.getDefaultHUDAnchor();
// Add Custom Anchor
for(var i = 0; i < RS.HUD.param.nMaxMembers; i++) {
var idx = parseInt(i + 1);
anchor['Custom Pos ' + idx] = RS.HUD.param.ptCustormAnchor[i];
}
return anchor[magnet];
};
HUD.prototype.setAnchor = function(anchor) {
var pos = this.getAnchor(anchor);
if(typeof(pos) === 'object') {
this._hud.x = pos.x;
this._hud.y = pos.y;
} else {
this.setAnchor(RS.HUD.param.szAnchor);
}
};
HUD.prototype.setMemberIndex = function(index) {
this._memberIndex = index;
};
HUD.prototype.fromImage = function(textureName) {
var texture = PIXI.utils.TextureCache[textureName + ".png"];
var sprite = new PIXI.Sprite(texture);
return sprite;
};
HUD.prototype.createHud = function() {
this._hud = this.fromImage(RS.HUD.param.imgEmptyHUD);
this.addChild(this._hud);
};
HUD.prototype.createFace = function() {
var player = this.getPlayer();
if(Imported["SumRndmDde Character Creator EX"]) {
if(player.hasSetImage()) {
this._faceBitmap = player.getCreatorBitmapFace();
} else {
this._faceBitmap = RS.HUD.loadFace(player.faceName());
}
} else {
this._faceBitmap = RS.HUD.loadFace(player.faceName());
}
this._maskBitmap = RS.HUD.loadPicture(RS.HUD.param.imgMasking);
this._maskBitmap.addLoadListener(function() {
this._faceBitmap.addLoadListener(this.circleClippingMask.bind(this, player.faceIndex()));
}.bind(this));
};
HUD.prototype.circleClippingMask = function(faceIndex) {
this._face = new Sprite();
var fw = Window_Base._faceWidth;
var fh = Window_Base._faceHeight;
var sx = (faceIndex % 4) * fw;
var sy = Math.floor(faceIndex / 4) * fh;
this._face.bitmap = new Bitmap(nFaceDiameter, nFaceDiameter);
if (RS.HUD.param.blurProcessing) {
this._face.bitmap.drawClippingImage(this._faceBitmap, this._maskBitmap, 0, 0, sx, sy);
} else {
this._face.bitmap.drawClippingImageNonBlur(this._faceBitmap, 0, 0, sx, sy);
}
this.addChild(this._face);
this.setCoord(this._face, RS.HUD.param.ptFace);
};
HUD.prototype.createMask = function (parent) {
var mask = new PIXI.Graphics();
mask.beginFill(0x0000ff, 1.0);
mask.drawRect(0,0, parent.width, parent.height);
mask.endFill();
parent.mask = mask;
return mask;
};
HUD.prototype.createHp = function() {
this._hp = this.fromImage(RS.HUD.param.imgHP);
this._hpMask = this.createMask(this._hp);
this.addChild(this._hpMask, this._hp);
};
HUD.prototype.createMp = function() {
this._mp = this.fromImage(RS.HUD.param.imgMP);
this._mpMask = this.createMask(this._mp);
this.addChild(this._mpMask, this._mp);
};
HUD.prototype.createExp = function() {
this._exp = this.fromImage(RS.HUD.param.imgEXP);
this._expMask = this.createMask(this._exp);
this.addChild(this._expMask, this._exp);
};
HUD.prototype.getTextParams = function(src) {
var param = RS.HUD.param;
var textProperties = {
'HP': [param.hpTextSize, param.szHpColor, param.szHpOutlineColor, param.szHpOutlineWidth],
'MP': [param.mpTextSize, param.szMpColor, param.szMpOutlineColor, param.szMpOutlineWidth],
'EXP': [param.expTextSize, param.szExpColor, param.szExpOutlineColor, param.szExpOutlineWidth],
'LEVEL': [param.levelTextSize, param.szLevelColor, param.szLevelOutlineColor, param.szLevelOutlineWidth],
'NAME': [param.nameTextSize, param.szNameColor, param.szNameOutlineColor, param.szNameOutlineWidth]
};
return textProperties[src];
};
HUD.prototype.createText = function() {
this._hpText = this.addText(this.getHp.bind(this), this.getTextParams('HP'));
this._mpText = this.addText(this.getMp.bind(this), this.getTextParams('MP'));
this._expText = this.addText(this.getExp.bind(this), this.getTextParams('EXP'));
this._levelText = this.addText(this.getLevel.bind(this), this.getTextParams('LEVEL'));
this._nameText = this.addText(this.getName.bind(this), this.getTextParams('NAME'));
};
HUD.prototype.setPosition = function() {
var param = RS.HUD.param;
if(this._face) this.setCoord(this._face, param.ptFace);
this.setCoord(this._hpMask, param.ptHP);
this.setCoord(this._hp, param.ptHP);
this.setCoord(this._mpMask, param.ptMP);
this.setCoord(this._mp, param.ptMP);
this.setCoord(this._expMask, param.ptEXP);
this.setCoord(this._exp, param.ptEXP);
this.setCoord(this._hpText, param.ptHPText);
this.setCoord(this._mpText, param.ptMPText);
this.setCoord(this._levelText, param.ptLevelText);
this.setCoord(this._expText, param.ptEXPText);
this.setCoord(this._nameText, param.ptNameText);
};
HUD.prototype.addText = function(strFunc, params) {
var bitmap = new Bitmap(120, params[0] + 8);
var text = new TextData(bitmap, strFunc, params);
var length = this.children.length;
this.addChildAt(text, length);
text.drawDisplayText();
return text;
};
HUD.prototype.getPlayer = function() {
return $gameParty.members()[this._memberIndex];
};
HUD.prototype.getHp = function() {
var player = this.getPlayer();
if(!player) return "0 / 0";
if(RS.HUD.param.showComma) {
return "%1 / %2".appendComma(player.hp, player.mhp);
} else {
return "%1 / %2".format(player.hp, player.mhp);
}
};
HUD.prototype.getMp = function() {
var player = this.getPlayer();
if(!player) return "0 / 0";
if(RS.HUD.param.showComma) {
return "%1 / %2".appendComma(player.mp, player.mmp);
} else {
return "%1 / %2".format(player.mp, player.mmp);
}
};
HUD.prototype.getExp = function() {
var player = this.getPlayer();
if(!player) return "0 / 0";
if(player.isMaxLevel()) return RS.HUD.param.maxExpText;
if(RS.HUD.param.showComma) {
return "%1 / %2".appendComma(player.relativeExp(), player.relativeMaxExp());
} else {
return "%1 / %2".format(player.relativeExp(), player.relativeMaxExp());
}
};
HUD.prototype.getLevel = function() {
var player = this.getPlayer();
if(!player) return "0";
if(RS.HUD.param.showComma) {
return "%1".appendComma(player.level);
} else {
return "%1".format(player.level);
}
};
HUD.prototype.getName = function() {
var player = this.getPlayer();
if(!player) return "";
var name = player && player.name();
if(name) {
return name;
} else {
return ' ';
}
};
HUD.prototype.getHpRate = function() {
var player = this.getPlayer();
if(!player) return 0;
return this._hp.width * (player.hp / player.mhp);
};
HUD.prototype.getMpRate = function() {
var player = this.getPlayer();
if(!player) return 0;
return this._mp.width * (player.mp / player.mmp);
};
HUD.prototype.getExpRate = function() {
var player = this.getPlayer();
if(!player) return 0;
return this._exp.width * (player.relativeExp() / player.relativeMaxExp());
};
HUD.prototype.getRealExpRate = function () {
var player = this.getPlayer();
if(!player) return 0;
if(this.inBattle() && $dataSystem.optDisplayTp) {
return ( player.tp / player.maxTp() );
} else {
return ( player.relativeExp() / player.relativeMaxExp() );
}
};
HUD.prototype.setCoord = function(s,obj) {
var oy = (s._callbackFunction instanceof Function) ? (s.bitmap.height / 2) : 0;
s.x = this._hud.x + obj.x;
s.y = this._hud.y + obj.y - oy;
s.visible = obj.visible;
};
HUD.prototype.update = function() {
this.paramUpdate();
};
HUD.prototype.updateText = function() {
this._hpText.requestUpdate();
this._mpText.requestUpdate();
this._expText.requestUpdate();
this._levelText.requestUpdate();
this._nameText.requestUpdate();
};
HUD.prototype.updateGuage = function (mask, w, h) {
if(!mask) return;
mask.clear();
mask.beginFill(0x0000ff, 1.0);
mask.drawRect(0,0, w, h);
mask.endFill();
};
HUD.prototype.paramUpdate = function() {
this.updateGuage(this._hpMask, this.getHpRate(), this._hp.height);
this.updateGuage(this._mpMask, this.getMpRate(), this._mp.height);
this.updateGuage(this._expMask, this.getExpRate(), this._exp.height);
};
HUD.prototype.inBattle = function() {
return (SceneManager._scene instanceof Scene_Battle ||
$gameParty.inBattle() ||
DataManager.isBattleTest());
};
Object.defineProperty(HUD.prototype, 'show', {
get: function() {
return $gameSystem._rs_hud.show;
},
set: function(value) {
this.children.forEach( function(i) {
i.visible = value;
}, this);
$gameSystem._rs_hud.show = value;
if(value === true) {
this.setPosition();
}
},
});
Object.defineProperty(HUD.prototype, 'opacity', {
get: function() {
return $gameSystem._rs_hud.opacity;
},
set: function(value) {
this.children.forEach( function(i) {
i.opacity = value.clamp(0, 255);
}, this);
$gameSystem._rs_hud.opacity = value.clamp(0, 255);
},
});
//----------------------------------------------------------------------------
// Scene_Map
//
//
var alias_Scene_Map_createDisplayObjects = Scene_Map.prototype.createDisplayObjects;
Scene_Map.prototype.createDisplayObjects = function() {
alias_Scene_Map_createDisplayObjects.call(this);
if(RS.HUD.param.battleOnly || ($dataMap && $dataMap.meta.DISABLE_HUD) ) {
$gameHud = new RS_EmptyHudLayer();
} else {
this._hudLayer = new RS_HudLayer();
this._hudLayer.setFrame(0, 0, Graphics.boxWidth, Graphics.boxHeight);
$gameHud = this._hudLayer;
this._hudLayer.drawAllHud();
this.addChild(this._hudLayer);
this.swapChildren(this._windowLayer, this._hudLayer);
}
};
var alias_Scene_Map_start = Scene_Map.prototype.start;
Scene_Map.prototype.start = function () {
alias_Scene_Map_start.call(this);
$gameTemp.notifyHudTextRefresh();
};
var alias_Scene_Map_terminate = Scene_Map.prototype.terminate;
Scene_Map.prototype.terminate = function() {
this.removeChild(this._hudLayer);
$gameHud = null;
alias_Scene_Map_terminate.call(this);
};
//----------------------------------------------------------------------------
// Game_Party
//
//
var alias_Game_Party_addActor = Game_Party.prototype.addActor;
Game_Party.prototype.addActor = function(actorId) {
alias_Game_Party_addActor.call(this, actorId);
$gameTemp.notifyHudRefresh();
};
var alias_Game_Party_removeActor = Game_Party.prototype.removeActor;
Game_Party.prototype.removeActor = function(actorId) {
alias_Game_Party_removeActor.call(this, actorId);
$gameTemp.notifyHudRefresh();
};
//----------------------------------------------------------------------------
// Scene_Boot
//
//
var alias_Scene_Boot_loadSystemWindowImage = Scene_Boot.prototype.loadSystemWindowImage;
Scene_Boot.prototype.loadSystemWindowImage = function() {
alias_Scene_Boot_loadSystemWindowImage.call(this);
// Load Face
RS.HUD.param.preloadImportantFaces.forEach(function(i) {
if(Utils.RPGMAKER_VERSION >= '1.5.0') {
ImageManager.reserveFace(i)
} else {
ImageManager.loadFace(i);
}
}, this);
if(Utils.RPGMAKER_VERSION >= '1.5.0') {
ImageManager.reservePicture(RS.HUD.param.imgHP);
ImageManager.reservePicture(RS.HUD.param.imgMP);
ImageManager.reservePicture(RS.HUD.param.imgEXP);
}
};
var alias_Scene_Boot_start = Scene_Boot.prototype.start;
Scene_Boot.prototype.start = function() {
alias_Scene_Boot_start.call(this);
// Load Custom Anchor
for(var i = 0; i < RS.HUD.param.nMaxMembers; i++) {
RS.HUD.param.ptCustormAnchor.push( RS.HUD.loadCustomPosition(parameters[String('Custom Pos ' + (i + 1))] || '0, 0') );
}
// Load Custom Font
if(RS.HUD.param.bUseCustomFont) {
Graphics.loadFont(RS.HUD.param.szCustomFontName, RS.HUD.param.szCustomFontSrc);
}
};
//----------------------------------------------------------------------------
// Scene_Map
//
//
var alias_Scene_Map_snapForBattleBackground = Scene_Map.prototype.snapForBattleBackground;
Scene_Map.prototype.snapForBattleBackground = function() {
var temp = $gameHud.show;
if($gameHud && $gameHud.show) $gameHud.show = false;
alias_Scene_Map_snapForBattleBackground.call(this);
if($gameHud && !$gameHud.show) {
RS.HUD.param.isPreviousShowUp = temp;
$gameHud.show = temp;
}
};
var alias_Scene_Map_updateFade = Scene_Map.prototype.updateFade;
Scene_Map.prototype.updateFade = function() {
alias_Scene_Map_updateFade.call(this);
if(this._fadeDuration == 0 && RS.HUD.param.isCurrentBattleShowUp) {
if($gameHud) $gameHud.show = RS.HUD.param.isPreviousShowUp;
RS.HUD.param.isCurrentBattleShowUp = false;
}
};
var alias_Scene_Battle_updateFade = Scene_Battle.prototype.updateFade;
Scene_Battle.prototype.updateFade = function() {
alias_Scene_Battle_updateFade.call(this);
if(this._fadeDuration == 0 && !RS.HUD.param.isCurrentBattleShowUp) {
if($gameHud) $gameHud.show = true;
RS.HUD.param.isCurrentBattleShowUp = true;
}
};
//----------------------------------------------------------------------------
// Game_Interpreter
//
//
var alias_Game_Interpreter_pluginCommand = Game_Interpreter.prototype.pluginCommand;
Game_Interpreter.prototype.pluginCommand = function(command, args) {
alias_Game_Interpreter_pluginCommand.call(this, command, args);
if(command === "RS_HUD") {
switch (args[0].toLowerCase()) {
case 'opacity':
$gameHud.opacity = Number(args[1]);
break;
case 'visible':
$gameHud.show = Boolean(args[1] === "true");
break;
case 'import':
RS.HUD.importDataWithAjax(args[1] + '.json');
break;
case 'export':
RS.HUD.exportData(args[1] + '.json');
break;
}
}
};
//----------------------------------------------------------------------------
// String Utils
//
//
/**
* String.prototype.toArray
*/
String.prototype.toArray = function(){
return this.split("");
}
/**
* String.prototype.reverse
*/
String.prototype.reverse = function(){
return this.toArray().reverse().join("");
}
/**
* String.prototype.toComma
*/
String.prototype.toComma = function(){
return this.reverse().match(/.{1,3}/g).join(",").reverse();
}
/**
* Replaces %1, %2 and so on in the string to the arguments.
*
* @method String.prototype.format
* @param {Any} ...args The objects to format
* @return {String} A formatted string
*/
String.prototype.appendComma = function() {
var args = arguments;
return this.replace(/%([0-9]+)/g, function(s, n) {
return (args[Number(n) - 1] + '').toComma();
});
};
//============================================================================
// Scene_Boot
//============================================================================
var alias_Scene_Boot_isReady = Scene_Boot.prototype.isReady;
Scene_Boot.prototype.isReady = function() {
if(alias_Scene_Boot_isReady.call(this)) {
return RS.HUD.param.init;
} else {
return false;
}
};
//----------------------------------------------------------------------------
// Output Objects
//
//
window.HUD = HUD;
window.RS_HudLayer = RS_HudLayer;
})();<|fim▁end|> | |
<|file_name|>gobject.rs<|end_file_name|><|fim▁begin|>// This file is part of Grust, GObject introspection bindings for Rust
//
// Copyright (C) 2013, 2014 Mikhail Zabaluev <[email protected]>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#![crate_name = "grust_gobject_2_0"]
#![crate_type = "lib"]
extern crate grust;<|fim▁hole|>use grust::gtype::GType;
use grust::object;
use grust::wrap;
#[repr(C)]
pub struct TypeInstance {
raw: ffi::GTypeInstance
}
unsafe impl wrap::Wrapper for TypeInstance {
type Raw = ffi::GTypeInstance;
}
#[repr(C)]
pub struct Object {
raw: ffi::GObject
}
unsafe impl wrap::Wrapper for Object {
type Raw = ffi::GObject;
}
pub mod cast {
use grust::object;
pub trait AsObject {
fn as_object(&self) -> &super::Object;
}
impl<T> AsObject for T where T: object::Upcast<super::Object> {
#[inline]
fn as_object(&self) -> &super::Object { self.upcast() }
}
}
unsafe impl object::ObjectType for Object {
fn get_type() -> GType {
unsafe {
GType::from_raw(ffi::g_object_get_type())
}
}
}<|fim▁end|> | extern crate grust_glib_2_0 as glib;
extern crate gobject_2_0_sys as ffi;
|
<|file_name|>hid.cpp<|end_file_name|><|fim▁begin|>// Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cmath>
#include "common/emu_window.h"
#include "common/logging/log.h"
#include "core/core_timing.h"
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/service/hid/hid.h"
#include "core/hle/service/hid/hid_spvr.h"
#include "core/hle/service/hid/hid_user.h"
#include "core/hle/service/service.h"
#include "video_core/video_core.h"
namespace Service {
namespace HID {
// Handle to shared memory region designated to HID_User service
static Kernel::SharedPtr<Kernel::SharedMemory> shared_mem;
// Event handles
static Kernel::SharedPtr<Kernel::Event> event_pad_or_touch_1;
static Kernel::SharedPtr<Kernel::Event> event_pad_or_touch_2;
static Kernel::SharedPtr<Kernel::Event> event_accelerometer;
static Kernel::SharedPtr<Kernel::Event> event_gyroscope;
static Kernel::SharedPtr<Kernel::Event> event_debug_pad;
static u32 next_pad_index;
static u32 next_touch_index;
static u32 next_accelerometer_index;
static u32 next_gyroscope_index;
static int enable_accelerometer_count = 0; // positive means enabled
static int enable_gyroscope_count = 0; // positive means enabled
static PadState GetCirclePadDirectionState(s16 circle_pad_x, s16 circle_pad_y) {
// 30 degree and 60 degree are angular thresholds for directions
constexpr float TAN30 = 0.577350269f;
constexpr float TAN60 = 1 / TAN30;
// a circle pad radius greater than 40 will trigger circle pad direction
constexpr int CIRCLE_PAD_THRESHOLD_SQUARE = 40 * 40;
PadState state;
state.hex = 0;
if (circle_pad_x * circle_pad_x + circle_pad_y * circle_pad_y > CIRCLE_PAD_THRESHOLD_SQUARE) {
float t = std::abs(static_cast<float>(circle_pad_y) / circle_pad_x);
if (circle_pad_x != 0 && t < TAN60) {
if (circle_pad_x > 0)
state.circle_right.Assign(1);
else
state.circle_left.Assign(1);
}
if (circle_pad_x == 0 || t > TAN30) {
if (circle_pad_y > 0)
state.circle_up.Assign(1);
else
state.circle_down.Assign(1);
}
}
return state;
}
void Update() {
SharedMem* mem = reinterpret_cast<SharedMem*>(shared_mem->GetPointer());
if (mem == nullptr) {
LOG_DEBUG(Service_HID, "Cannot update HID prior to mapping shared memory!");
return;
}
PadState state = VideoCore::g_emu_window->GetPadState();
// Get current circle pad position and update circle pad direction
s16 circle_pad_x, circle_pad_y;
std::tie(circle_pad_x, circle_pad_y) = VideoCore::g_emu_window->GetCirclePadState();
state.hex |= GetCirclePadDirectionState(circle_pad_x, circle_pad_y).hex;
mem->pad.current_state.hex = state.hex;
mem->pad.index = next_pad_index;
next_pad_index = (next_pad_index + 1) % mem->pad.entries.size();
// Get the previous Pad state
u32 last_entry_index = (mem->pad.index - 1) % mem->pad.entries.size();
PadState old_state = mem->pad.entries[last_entry_index].current_state;
// Compute bitmask with 1s for bits different from the old state
PadState changed = {{(state.hex ^ old_state.hex)}};
// Get the current Pad entry
PadDataEntry& pad_entry = mem->pad.entries[mem->pad.index];
// Update entry properties
pad_entry.current_state.hex = state.hex;
pad_entry.delta_additions.hex = changed.hex & state.hex;
pad_entry.delta_removals.hex = changed.hex & old_state.hex;
pad_entry.circle_pad_x = circle_pad_x;
pad_entry.circle_pad_y = circle_pad_y;
// If we just updated index 0, provide a new timestamp
if (mem->pad.index == 0) {
mem->pad.index_reset_ticks_previous = mem->pad.index_reset_ticks;
mem->pad.index_reset_ticks = (s64)CoreTiming::GetTicks();
}
mem->touch.index = next_touch_index;
next_touch_index = (next_touch_index + 1) % mem->touch.entries.size();
// Get the current touch entry
TouchDataEntry& touch_entry = mem->touch.entries[mem->touch.index];
bool pressed = false;
std::tie(touch_entry.x, touch_entry.y, pressed) = VideoCore::g_emu_window->GetTouchState();
touch_entry.valid.Assign(pressed ? 1 : 0);
// TODO(bunnei): We're not doing anything with offset 0xA8 + 0x18 of HID SharedMemory, which
// supposedly is "Touch-screen entry, which contains the raw coordinate data prior to being
// converted to pixel coordinates." (http://3dbrew.org/wiki/HID_Shared_Memory#Offset_0xA8).
// If we just updated index 0, provide a new timestamp
if (mem->touch.index == 0) {
mem->touch.index_reset_ticks_previous = mem->touch.index_reset_ticks;<|fim▁hole|> event_pad_or_touch_1->Signal();
event_pad_or_touch_2->Signal();
// Update accelerometer
if (enable_accelerometer_count > 0) {
mem->accelerometer.index = next_accelerometer_index;
next_accelerometer_index =
(next_accelerometer_index + 1) % mem->accelerometer.entries.size();
AccelerometerDataEntry& accelerometer_entry =
mem->accelerometer.entries[mem->accelerometer.index];
std::tie(accelerometer_entry.x, accelerometer_entry.y, accelerometer_entry.z) =
VideoCore::g_emu_window->GetAccelerometerState();
// Make up "raw" entry
// TODO(wwylele):
// From hardware testing, the raw_entry values are approximately,
// but not exactly, as twice as corresponding entries (or with a minus sign).
// It may caused by system calibration to the accelerometer.
// Figure out how it works, or, if no game reads raw_entry,
// the following three lines can be removed and leave raw_entry unimplemented.
mem->accelerometer.raw_entry.x = -2 * accelerometer_entry.x;
mem->accelerometer.raw_entry.z = 2 * accelerometer_entry.y;
mem->accelerometer.raw_entry.y = -2 * accelerometer_entry.z;
// If we just updated index 0, provide a new timestamp
if (mem->accelerometer.index == 0) {
mem->accelerometer.index_reset_ticks_previous = mem->accelerometer.index_reset_ticks;
mem->accelerometer.index_reset_ticks = (s64)CoreTiming::GetTicks();
}
event_accelerometer->Signal();
}
// Update gyroscope
if (enable_gyroscope_count > 0) {
mem->gyroscope.index = next_gyroscope_index;
next_gyroscope_index = (next_gyroscope_index + 1) % mem->gyroscope.entries.size();
GyroscopeDataEntry& gyroscope_entry = mem->gyroscope.entries[mem->gyroscope.index];
std::tie(gyroscope_entry.x, gyroscope_entry.y, gyroscope_entry.z) =
VideoCore::g_emu_window->GetGyroscopeState();
// Make up "raw" entry
mem->gyroscope.raw_entry.x = gyroscope_entry.x;
mem->gyroscope.raw_entry.z = -gyroscope_entry.y;
mem->gyroscope.raw_entry.y = gyroscope_entry.z;
// If we just updated index 0, provide a new timestamp
if (mem->gyroscope.index == 0) {
mem->gyroscope.index_reset_ticks_previous = mem->gyroscope.index_reset_ticks;
mem->gyroscope.index_reset_ticks = (s64)CoreTiming::GetTicks();
}
event_gyroscope->Signal();
}
}
void GetIPCHandles(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
cmd_buff[1] = 0; // No error
cmd_buff[2] = 0x14000000; // IPC Command Structure translate-header
// TODO(yuriks): Return error from SendSyncRequest is this fails (part of IPC marshalling)
cmd_buff[3] = Kernel::g_handle_table.Create(Service::HID::shared_mem).MoveFrom();
cmd_buff[4] = Kernel::g_handle_table.Create(Service::HID::event_pad_or_touch_1).MoveFrom();
cmd_buff[5] = Kernel::g_handle_table.Create(Service::HID::event_pad_or_touch_2).MoveFrom();
cmd_buff[6] = Kernel::g_handle_table.Create(Service::HID::event_accelerometer).MoveFrom();
cmd_buff[7] = Kernel::g_handle_table.Create(Service::HID::event_gyroscope).MoveFrom();
cmd_buff[8] = Kernel::g_handle_table.Create(Service::HID::event_debug_pad).MoveFrom();
}
void EnableAccelerometer(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
++enable_accelerometer_count;
event_accelerometer->Signal();
cmd_buff[1] = RESULT_SUCCESS.raw;
LOG_DEBUG(Service_HID, "called");
}
void DisableAccelerometer(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
--enable_accelerometer_count;
event_accelerometer->Signal();
cmd_buff[1] = RESULT_SUCCESS.raw;
LOG_DEBUG(Service_HID, "called");
}
void EnableGyroscopeLow(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
++enable_gyroscope_count;
event_gyroscope->Signal();
cmd_buff[1] = RESULT_SUCCESS.raw;
LOG_DEBUG(Service_HID, "called");
}
void DisableGyroscopeLow(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
--enable_gyroscope_count;
event_gyroscope->Signal();
cmd_buff[1] = RESULT_SUCCESS.raw;
LOG_DEBUG(Service_HID, "called");
}
void GetGyroscopeLowRawToDpsCoefficient(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
cmd_buff[1] = RESULT_SUCCESS.raw;
f32 coef = VideoCore::g_emu_window->GetGyroscopeRawToDpsCoefficient();
memcpy(&cmd_buff[2], &coef, 4);
}
void GetGyroscopeLowCalibrateParam(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
cmd_buff[1] = RESULT_SUCCESS.raw;
const s16 param_unit = 6700; // an approximate value taken from hw
GyroscopeCalibrateParam param = {
{0, param_unit, -param_unit}, {0, param_unit, -param_unit}, {0, param_unit, -param_unit},
};
memcpy(&cmd_buff[2], ¶m, sizeof(param));
LOG_WARNING(Service_HID, "(STUBBED) called");
}
void GetSoundVolume(Service::Interface* self) {
u32* cmd_buff = Kernel::GetCommandBuffer();
const u8 volume = 0x3F; // TODO(purpasmart): Find out if this is the max value for the volume
cmd_buff[1] = RESULT_SUCCESS.raw;
cmd_buff[2] = volume;
LOG_WARNING(Service_HID, "(STUBBED) called");
}
void Init() {
using namespace Kernel;
AddService(new HID_U_Interface);
AddService(new HID_SPVR_Interface);
using Kernel::MemoryPermission;
shared_mem =
SharedMemory::Create(nullptr, 0x1000, MemoryPermission::ReadWrite, MemoryPermission::Read,
0, Kernel::MemoryRegion::BASE, "HID:SharedMemory");
next_pad_index = 0;
next_touch_index = 0;
// Create event handles
event_pad_or_touch_1 = Event::Create(ResetType::OneShot, "HID:EventPadOrTouch1");
event_pad_or_touch_2 = Event::Create(ResetType::OneShot, "HID:EventPadOrTouch2");
event_accelerometer = Event::Create(ResetType::OneShot, "HID:EventAccelerometer");
event_gyroscope = Event::Create(ResetType::OneShot, "HID:EventGyroscope");
event_debug_pad = Event::Create(ResetType::OneShot, "HID:EventDebugPad");
}
void Shutdown() {
shared_mem = nullptr;
event_pad_or_touch_1 = nullptr;
event_pad_or_touch_2 = nullptr;
event_accelerometer = nullptr;
event_gyroscope = nullptr;
event_debug_pad = nullptr;
}
} // namespace HID
} // namespace Service<|fim▁end|> | mem->touch.index_reset_ticks = (s64)CoreTiming::GetTicks();
}
// Signal both handles when there's an update to Pad or touch |
<|file_name|>SSSDConfigTest.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
'''
Created on Sep 18, 2009
@author: sgallagh
'''
import unittest
import os
from stat import *
import sys
srcdir = os.getenv('srcdir')
if srcdir:
sys.path.insert(0, "./src/config")
srcdir = srcdir + "/src/config"
else:
srcdir = "."
import SSSDConfig
class SSSDConfigTestValid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Validate services
services = sssdconfig.list_services()
self.assertTrue('sssd' in services)
self.assertTrue('nss' in services)
self.assertTrue('pam' in services)
#Verify service attributes
sssd_service = sssdconfig.get_service('sssd')
service_opts = sssd_service.list_options()
self.assertTrue('services' in service_opts.keys())
service_list = sssd_service.get_option('services')
self.assertTrue('nss' in service_list)
self.assertTrue('pam' in service_list)
self.assertTrue('domains' in service_opts)
self.assertTrue('reconnection_retries' in service_opts)
del sssdconfig
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
sssdconfig.delete_service('sssd')
new_sssd_service = sssdconfig.new_service('sssd');
new_options = new_sssd_service.list_options();
self.assertTrue('debug_level' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('command' in new_options)
self.assertEquals(new_options['command'][0], str)
self.assertTrue('reconnection_retries' in new_options)
self.assertEquals(new_options['reconnection_retries'][0], int)
self.assertTrue('services' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('domains' in new_options)
self.assertEquals(new_options['domains'][0], list)
self.assertEquals(new_options['domains'][1], str)
self.assertTrue('sbus_timeout' in new_options)
self.assertEquals(new_options['sbus_timeout'][0], int)
self.assertTrue('re_expression' in new_options)
self.assertEquals(new_options['re_expression'][0], str)
self.assertTrue('full_name_format' in new_options)
self.assertEquals(new_options['full_name_format'][0], str)
self.assertTrue('default_domain_suffix' in new_options)
self.assertEquals(new_options['default_domain_suffix'][0], str)
del sssdconfig
def testDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
#Validate domain list
domains = sssdconfig.list_domains()
self.assertTrue('LOCAL' in domains)
self.assertTrue('LDAP' in domains)
self.assertTrue('PROXY' in domains)
self.assertTrue('IPA' in domains)
#Verify domain attributes
ipa_domain = sssdconfig.get_domain('IPA')
domain_opts = ipa_domain.list_options()
self.assertTrue('debug_level' in domain_opts.keys())
self.assertTrue('id_provider' in domain_opts.keys())
self.assertTrue('auth_provider' in domain_opts.keys())
del sssdconfig
def testListProviders(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
junk_domain = sssdconfig.new_domain('junk')
providers = junk_domain.list_providers()
self.assertTrue('ldap' in providers.keys())
def testCreateNewLocalConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
local_domain = sssdconfig.new_domain('LOCAL')
local_domain.add_provider('local', 'id')
local_domain.set_option('debug_level', 1)
local_domain.set_option('default_shell', '/bin/tcsh')
local_domain.set_active(True)
sssdconfig.save_domain(local_domain)
of = '/tmp/testCreateNewLocalConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testCreateNewLDAPConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
ldap_domain = sssdconfig.new_domain('LDAP')
ldap_domain.add_provider('ldap', 'id')
ldap_domain.set_option('debug_level', 1)
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testCreateNewLDAPConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testModifyExistingConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
ldap_domain.set_option('debug_level', 3)
ldap_domain.remove_provider('auth')
ldap_domain.add_provider('krb5', 'auth')
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testModifyExistingConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testSpaces(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
self.assertEqual(ldap_domain.get_option('auth_provider'), 'ldap')
self.assertEqual(ldap_domain.get_option('id_provider'), 'ldap')
class SSSDConfigTestInvalid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBadBool(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-invalid-badbool.conf")
self.assertRaises(TypeError,
sssdconfig.get_domain,'IPA')
class SSSDConfigTestSSSDService(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigSchema(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
def tearDown(self):
pass
def testInit(self):
# Positive test
service = SSSDConfig.SSSDService('sssd', self.schema)
# Type Error test
# Name is not a string
self.assertRaises(TypeError, SSSDConfig.SSSDService, 3, self.schema)
# TypeError test
# schema is not an SSSDSchema
self.assertRaises(TypeError, SSSDConfig.SSSDService, '3', self)
# ServiceNotRecognizedError test
self.assertRaises(SSSDConfig.ServiceNotRecognizedError,
SSSDConfig.SSSDService, 'ssd', self.schema)
def testListOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
options = service.list_options()
control_list = [
'services',
'domains',
'timeout',
'force_timeout',
'sbus_timeout',
're_expression',
'full_name_format',
'krb5_rcache_dir',
'default_domain_suffix',
'debug_level',
'debug_timestamps',
'debug_microseconds',
'debug_to_files',
'command',
'reconnection_retries',
'fd_limit',
'client_idle_timeout',
'description']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['reconnection_retries']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['reconnection_retries'][0] == int,
"reconnection_retries should require an int. " +
"list_options is requiring a %s" %
options['reconnection_retries'][0])
self.assertTrue(options['reconnection_retries'][1] == None,
"reconnection_retries should not require a subtype. " +
"list_options is requiring a %s" %
options['reconnection_retries'][1])
self.assertTrue(options['reconnection_retries'][3] == None,
"reconnection_retries should have no default")
self.assertTrue(type(options['services']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['services'][0] == list,
"services should require an list. " +
"list_options is requiring a %s" %
options['services'][0])
self.assertTrue(options['services'][1] == str,
"services should require a subtype of str. " +
"list_options is requiring a %s" %
options['services'][1])
def testListMandatoryOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
options = service.list_mandatory_options()
control_list = [
'services',<|fim▁hole|> self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['services']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['services'][0] == list,
"services should require an list. " +
"list_options is requiring a %s" %
options['services'][0])
self.assertTrue(options['services'][1] == str,
"services should require a subtype of str. " +
"list_options is requiring a %s" %
options['services'][1])
def testSetOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Exactly right
service.set_option('debug_level', 2)
self.assertEqual(service.get_option('debug_level'), 2)
# Positive test - Allow converting "safe" values
service.set_option('debug_level', '2')
self.assertEqual(service.get_option('debug_level'), 2)
# Positive test - Remove option if value is None
service.set_option('debug_level', None)
self.assertTrue('debug_level' not in service.options.keys())
# Negative test - Nonexistent Option
self.assertRaises(SSSDConfig.NoOptionError, service.set_option, 'nosuchoption', 1)
# Negative test - Incorrect type
self.assertRaises(TypeError, service.set_option, 'debug_level', 'two')
def testGetOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Single-valued
self.assertEqual(service.get_option('config_file_version'), 2)
# Positive test - List of values
self.assertEqual(service.get_option('services'), ['nss', 'pam'])
# Negative Test - Bad Option
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'nosuchoption')
def testGetAllOptions(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
#Positive test
options = service.get_all_options()
control_list = [
'config_file_version',
'services']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
def testRemoveOption(self):
service = SSSDConfig.SSSDService('sssd', self.schema)
# Positive test - Remove an option that exists
self.assertEqual(service.get_option('services'), ['nss', 'pam'])
service.remove_option('services')
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'debug_level')
# Positive test - Remove an option that doesn't exist
self.assertRaises(SSSDConfig.NoOptionError, service.get_option, 'nosuchentry')
service.remove_option('nosuchentry')
class SSSDConfigTestSSSDDomain(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigSchema(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
def tearDown(self):
pass
def testInit(self):
# Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
# Negative Test - Name not a string
self.assertRaises(TypeError, SSSDConfig.SSSDDomain, 2, self.schema)
# Negative Test - Schema is not an SSSDSchema
self.assertRaises(TypeError, SSSDConfig.SSSDDomain, 'mydomain', self)
def testGetName(self):
# Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
self.assertEqual(domain.get_name(), 'mydomain')
def testSetActive(self):
#Positive Test
domain = SSSDConfig.SSSDDomain('mydomain', self.schema)
# Should default to inactive
self.assertFalse(domain.active)
domain.set_active(True)
self.assertTrue(domain.active)
domain.set_active(False)
self.assertFalse(domain.active)
def testListOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_options()
control_list = [
'description',
'debug_level',
'debug_timestamps',
'min_id',
'max_id',
'timeout',
'force_timeout',
'offline_timeout',
'try_inotify',
'command',
'enumerate',
'cache_credentials',
'store_legacy_passwords',
'use_fully_qualified_names',
'ignore_group_members',
'filter_users',
'filter_groups',
'entry_cache_timeout',
'entry_cache_user_timeout',
'entry_cache_group_timeout',
'entry_cache_netgroup_timeout',
'entry_cache_service_timeout',
'entry_cache_autofs_timeout',
'entry_cache_sudo_timeout',
'refresh_expired_interval',
'lookup_family_order',
'account_cache_expiration',
'dns_resolver_timeout',
'dns_discovery_domain',
'dyndns_update',
'dyndns_ttl',
'dyndns_iface',
'dyndns_refresh_interval',
'dyndns_update_ptr',
'dyndns_force_tcp',
'dyndns_auth',
'subdomain_enumerate',
'override_gid',
'case_sensitive',
'override_homedir',
'fallback_homedir',
'override_shell',
'default_shell',
'pwd_expiration_warning',
'id_provider',
'auth_provider',
'access_provider',
'chpass_provider',
'sudo_provider',
'autofs_provider',
'session_provider',
'hostid_provider',
'subdomains_provider',
'realmd_tags',
'subdomain_refresh_interval']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['max_id']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['max_id'][0] == int,
"max_id should require an int. " +
"list_options is requiring a %s" %
options['max_id'][0])
self.assertTrue(options['max_id'][1] == None,
"max_id should not require a subtype. " +
"list_options is requiring a %s" %
options['max_id'][1])
# Add a provider and verify that the new options appear
domain.add_provider('local', 'id')
control_list.extend(
['default_shell',
'base_directory',
'create_homedir',
'remove_homedir',
'homedir_umask',
'skel_dir',
'mail_dir',
'userdel_cmd'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(
['krb5_server',
'krb5_backup_server',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
control_list.extend(['krb5_kdcip'])
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
def testListMandatoryOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_mandatory_options()
control_list = ['id_provider']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(['krb5_realm'])
options = domain.list_mandatory_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
options = domain.list_mandatory_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
def testListProviders(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
control_provider_dict = {
'ipa': ['id', 'auth', 'access', 'chpass', 'sudo', 'autofs',
'session', 'hostid', 'subdomains'],
'ad': ['id', 'auth', 'access', 'chpass', 'sudo', 'subdomains'],
'local': ['id', 'auth', 'chpass'],
'ldap': ['id', 'auth', 'access', 'chpass', 'sudo', 'autofs'],
'krb5': ['auth', 'access', 'chpass'],
'proxy': ['id', 'auth', 'chpass'],
'simple': ['access'],
'permit': ['access'],
'deny': ['access']}
providers = domain.list_providers()
# Ensure that all of the expected defaults are there
for provider in control_provider_dict.keys():
for ptype in control_provider_dict[provider]:
self.assertTrue(providers.has_key(provider))
self.assertTrue(ptype in providers[provider])
for provider in providers.keys():
for ptype in providers[provider]:
self.assertTrue(control_provider_dict.has_key(provider))
self.assertTrue(ptype in control_provider_dict[provider])
def testListProviderOptions(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Test looking up a specific provider type
options = domain.list_provider_options('krb5', 'auth')
control_list = [
'krb5_server',
'krb5_backup_server',
'krb5_kdcip',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
#Test looking up all provider values
options = domain.list_provider_options('krb5')
control_list.extend(['krb5_kpasswd'])
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
def testAddProvider(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive Test
domain.add_provider('local', 'id')
# Negative Test - No such backend type
self.assertRaises(SSSDConfig.NoSuchProviderError,
domain.add_provider, 'nosuchbackend', 'auth')
# Negative Test - No such backend subtype
self.assertRaises(SSSDConfig.NoSuchProviderSubtypeError,
domain.add_provider, 'ldap', 'nosuchsubtype')
# Negative Test - Try to add a second provider of the same type
self.assertRaises(SSSDConfig.ProviderSubtypeInUse,
domain.add_provider, 'ldap', 'id')
def testRemoveProvider(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# First test default options
options = domain.list_options()
control_list = [
'description',
'debug_level',
'debug_timestamps',
'min_id',
'max_id',
'timeout',
'force_timeout',
'offline_timeout',
'try_inotify',
'command',
'enumerate',
'cache_credentials',
'store_legacy_passwords',
'use_fully_qualified_names',
'ignore_group_members',
'filter_users',
'filter_groups',
'entry_cache_timeout',
'entry_cache_user_timeout',
'entry_cache_group_timeout',
'entry_cache_netgroup_timeout',
'entry_cache_service_timeout',
'entry_cache_autofs_timeout',
'entry_cache_sudo_timeout',
'refresh_expired_interval',
'account_cache_expiration',
'lookup_family_order',
'dns_resolver_timeout',
'dns_discovery_domain',
'dyndns_update',
'dyndns_ttl',
'dyndns_iface',
'dyndns_refresh_interval',
'dyndns_update_ptr',
'dyndns_force_tcp',
'dyndns_auth',
'subdomain_enumerate',
'override_gid',
'case_sensitive',
'override_homedir',
'fallback_homedir',
'override_shell',
'default_shell',
'pwd_expiration_warning',
'id_provider',
'auth_provider',
'access_provider',
'chpass_provider',
'sudo_provider',
'autofs_provider',
'session_provider',
'hostid_provider',
'subdomains_provider',
'realmd_tags',
'subdomain_refresh_interval']
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
self.assertTrue(type(options['max_id']) == tuple,
"Option values should be a tuple")
self.assertTrue(options['max_id'][0] == int,
"config_file_version should require an int. " +
"list_options is requiring a %s" %
options['max_id'][0])
self.assertTrue(options['max_id'][1] == None,
"config_file_version should not require a subtype. " +
"list_options is requiring a %s" %
options['max_id'][1])
# Add a provider and verify that the new options appear
domain.add_provider('local', 'id')
control_list.extend(
['default_shell',
'base_directory',
'create_homedir',
'remove_homedir',
'homedir_umask',
'skel_dir',
'mail_dir',
'userdel_cmd'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Add a provider that has global options and verify that
# The new options appear.
domain.add_provider('krb5', 'auth')
backup_list = control_list[:]
control_list.extend(
['krb5_server',
'krb5_backup_server',
'krb5_kdcip',
'krb5_realm',
'krb5_kpasswd',
'krb5_backup_kpasswd',
'krb5_ccachedir',
'krb5_ccname_template',
'krb5_keytab',
'krb5_validate',
'krb5_store_password_if_offline',
'krb5_auth_timeout',
'krb5_renewable_lifetime',
'krb5_lifetime',
'krb5_renew_interval',
'krb5_use_fast',
'krb5_fast_principal',
'krb5_canonicalize',
'krb5_use_enterprise_principal',
'krb5_use_kdcinfo'])
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in control_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in control_list,
'Option [%s] unexpectedly found' %
option)
# Remove the local ID provider and add an LDAP one
# LDAP ID providers can also use the krb5_realm
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
domain.add_provider('ldap', 'id')
# Set the krb5_realm option and the ldap_uri option
domain.set_option('krb5_realm', 'EXAMPLE.COM')
domain.set_option('ldap_uri', 'ldap://ldap.example.com')
self.assertEquals(domain.get_option('krb5_realm'),
'EXAMPLE.COM')
self.assertEquals(domain.get_option('ldap_uri'),
'ldap://ldap.example.com')
# Remove the LDAP provider and verify that krb5_realm remains
domain.remove_provider('id')
self.assertEquals(domain.get_option('krb5_realm'),
'EXAMPLE.COM')
self.assertFalse(domain.options.has_key('ldap_uri'))
# Put the LOCAL provider back
domain.add_provider('local', 'id')
# Remove the auth domain and verify that the options
# revert to the backup_list
domain.remove_provider('auth')
self.assertFalse(domain.options.has_key('auth_provider'))
options = domain.list_options()
self.assertTrue(type(options) == dict,
"Options should be a dictionary")
# Ensure that all of the expected defaults are there
for option in backup_list:
self.assertTrue(option in options.keys(),
"Option [%s] missing" %
option)
# Ensure that there aren't any unexpected options listed
for option in options.keys():
self.assertTrue(option in backup_list,
'Option [%s] unexpectedly found' %
option)
# Ensure that the krb5_realm option is now gone
self.assertFalse(domain.options.has_key('krb5_realm'))
# Test removing nonexistent provider - Real
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
# Test removing nonexistent provider - Bad backend type
# Should pass without complaint
domain.remove_provider('id')
self.assertFalse(domain.options.has_key('id_provider'))
# Test removing nonexistent provider - Bad provider type
# Should pass without complaint
domain.remove_provider('nosuchprovider')
self.assertFalse(domain.options.has_key('nosuchprovider_provider'))
def testGetOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Negative Test - Try to get valid option that is not set
self.assertRaises(SSSDConfig.NoOptionError, domain.get_option, 'max_id')
# Positive Test - Set the above option and get it
domain.set_option('max_id', 10000)
self.assertEqual(domain.get_option('max_id'), 10000)
# Negative Test - Try yo get invalid option
self.assertRaises(SSSDConfig.NoOptionError, domain.get_option, 'nosuchoption')
def testSetOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive Test
domain.set_option('max_id', 10000)
self.assertEqual(domain.get_option('max_id'), 10000)
# Positive Test - Remove option if value is None
domain.set_option('max_id', None)
self.assertTrue('max_id' not in domain.get_all_options().keys())
# Negative Test - invalid option
self.assertRaises(SSSDConfig.NoOptionError, domain.set_option, 'nosuchoption', 1)
# Negative Test - incorrect type
self.assertRaises(TypeError, domain.set_option, 'max_id', 'a string')
# Positive Test - Coax options to appropriate type
domain.set_option('max_id', '10000')
self.assertEqual(domain.get_option('max_id'), 10000)
domain.set_option('max_id', 30.2)
self.assertEqual(domain.get_option('max_id'), 30)
def testRemoveOption(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive test - Remove unset but valid option
self.assertFalse('max_id' in domain.get_all_options().keys())
domain.remove_option('max_id')
self.assertFalse('max_id' in domain.get_all_options().keys())
# Positive test - Remove unset and unknown option
self.assertFalse('nosuchoption' in domain.get_all_options().keys())
domain.remove_option('nosuchoption')
self.assertFalse('nosuchoption' in domain.get_all_options().keys())
def testSetName(self):
domain = SSSDConfig.SSSDDomain('sssd', self.schema)
# Positive test - Change the name once
domain.set_name('sssd2');
self.assertEqual(domain.get_name(), 'sssd2')
self.assertEqual(domain.oldname, 'sssd')
# Positive test - Change the name a second time
domain.set_name('sssd3')
self.assertEqual(domain.get_name(), 'sssd3')
self.assertEqual(domain.oldname, 'sssd')
# Negative test - try setting the name to a non-string
self.assertRaises(TypeError,
domain.set_name, 4)
class SSSDConfigTestSSSDConfig(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInit(self):
# Positive test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - No Such File
self.assertRaises(IOError,
SSSDConfig.SSSDConfig, "nosuchfile.api.conf", srcdir + "/etc/sssd.api.d")
# Negative Test - Schema is not parsable
self.assertRaises(SSSDConfig.ParsingError,
SSSDConfig.SSSDConfig, srcdir + "/testconfigs/noparse.api.conf", srcdir + "/etc/sssd.api.d")
def testImportConfig(self):
# Positive Test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Verify that all sections were imported
control_list = [
'sssd',
'nss',
'pam',
'sudo',
'domain/PROXY',
'domain/IPA',
'domain/LOCAL',
'domain/LDAP',
'domain/INVALIDPROVIDER',
'domain/INVALIDOPTION',
]
for section in control_list:
self.assertTrue(sssdconfig.has_section(section),
"Section [%s] missing" %
section)
for section in sssdconfig.sections():
self.assertTrue(section['name'] in control_list)
# Verify that all options were imported for a section
control_list = [
'services',
'reconnection_retries',
'domains',
'debug_timestamps',
'config_file_version']
for option in control_list:
self.assertTrue(sssdconfig.has_option('sssd', option),
"Option [%s] missing from [sssd]" %
option)
for option in sssdconfig.options('sssd'):
if option['type'] in ('empty', 'comment'):
continue
self.assertTrue(option['name'] in control_list,
"Option [%s] unexpectedly found" %
option)
#TODO: Check the types and values of the settings
# Negative Test - Missing config file
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(IOError, sssdconfig.import_config, "nosuchfile.conf")
# Negative Test - Invalid config file
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-invalid.conf")
# Negative Test - Invalid config file version
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-badversion.conf")
# Negative Test - No config file version
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
self.assertRaises(SSSDConfig.ParsingError, sssdconfig.import_config, srcdir + "/testconfigs/sssd-noversion.conf")
# Negative Test - Already initialized
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
self.assertRaises(SSSDConfig.AlreadyInitializedError,
sssdconfig.import_config, srcdir + "/testconfigs/sssd-valid.conf")
def testNewConfig(self):
# Positive Test
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
# Check that the defaults were set
control_list = [
'sssd',
'nss',
'pam',
'sudo',
'autofs',
'ssh',
'pac']
for section in control_list:
self.assertTrue(sssdconfig.has_section(section),
"Section [%s] missing" %
section)
for section in sssdconfig.sections():
self.assertTrue(section['name'] in control_list)
control_list = [
'config_file_version',
'services']
for option in control_list:
self.assertTrue(sssdconfig.has_option('sssd', option),
"Option [%s] missing from [sssd]" %
option)
for option in sssdconfig.options('sssd'):
if option['type'] in ('empty', 'comment'):
continue
self.assertTrue(option['name'] in control_list,
"Option [%s] unexpectedly found" %
option)
# Negative Test - Already Initialized
self.assertRaises(SSSDConfig.AlreadyInitializedError, sssdconfig.new_config)
def testWrite(self):
#TODO Write tests to compare output files
pass
def testListActiveServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_active_services)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'nss',
'pam']
active_services = sssdconfig.list_active_services()
for service in control_list:
self.assertTrue(service in active_services,
"Service [%s] missing" %
service)
for service in active_services:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testListInactiveServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_inactive_services)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'sssd',
'sudo']
inactive_services = sssdconfig.list_inactive_services()
for service in control_list:
self.assertTrue(service in inactive_services,
"Service [%s] missing" %
service)
for service in inactive_services:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testListServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - sssdconfig not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_services)
sssdconfig.new_config()
control_list = [
'sssd',
'pam',
'nss',
'sudo',
'autofs',
'ssh',
'pac']
service_list = sssdconfig.list_services()
for service in control_list:
self.assertTrue(service in service_list,
"Service [%s] missing" %
service)
for service in service_list:
self.assertTrue(service in control_list,
"Service [%s] unexpectedly found" %
service)
def testGetService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.get_service, 'sssd')
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
service = sssdconfig.get_service('sssd')
self.assertTrue(isinstance(service, SSSDConfig.SSSDService))
# Verify the contents of this service
self.assertEqual(type(service.get_option('debug_timestamps')), bool)
self.assertFalse(service.get_option('debug_timestamps'))
# Negative Test - No such service
self.assertRaises(SSSDConfig.NoServiceError, sssdconfig.get_service, 'nosuchservice')
# Positive test - Service with invalid option loads
# but ignores the invalid option
service = sssdconfig.get_service('pam')
self.assertFalse(service.options.has_key('nosuchoption'))
def testNewService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.new_service, 'sssd')
sssdconfig.new_config()
# Positive Test
# First need to remove the existing service
sssdconfig.delete_service('sssd')
service = sssdconfig.new_service('sssd')
self.failUnless(service.get_name() in sssdconfig.list_services())
# TODO: check that the values of this new service
# are set to the defaults from the schema
def testDeleteService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.delete_service, 'sssd')
sssdconfig.new_config()
# Positive Test
service = sssdconfig.delete_service('sssd')
def testSaveService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
new_service = SSSDConfig.SSSDService('sssd', sssdconfig.schema)
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.save_service, new_service)
# Positive Test
sssdconfig.new_config()
sssdconfig.save_service(new_service)
# TODO: check that all entries were saved correctly (change a few)
# Negative Test - Type Error
self.assertRaises(TypeError, sssdconfig.save_service, self)
def testActivateService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
service_name = 'sudo'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_service, service_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test - Activate an inactive service
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
sssdconfig.activate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
# Positive test - Activate an active service
# This should succeed
sssdconfig.activate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
# Negative test - Invalid service name
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, 'nosuchservice')
# Negative test - Invalid service name type
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, self)
def testDeactivateService(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
service_name = 'pam'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_service, service_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test -Deactivate an active service
self.assertTrue(service_name in sssdconfig.list_services())
self.assertTrue(service_name in sssdconfig.list_active_services())
self.assertFalse(service_name in sssdconfig.list_inactive_services())
sssdconfig.deactivate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
# Positive test - Deactivate an inactive service
# This should succeed
sssdconfig.deactivate_service(service_name)
self.assertTrue(service_name in sssdconfig.list_services())
self.assertFalse(service_name in sssdconfig.list_active_services())
self.assertTrue(service_name in sssdconfig.list_inactive_services())
# Negative test - Invalid service name
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, 'nosuchservice')
# Negative test - Invalid service name type
self.assertRaises(SSSDConfig.NoServiceError,
sssdconfig.activate_service, self)
def testListActiveDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_active_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'IPA',
'LOCAL']
active_domains = sssdconfig.list_active_domains()
for domain in control_list:
self.assertTrue(domain in active_domains,
"Domain [%s] missing" %
domain)
for domain in active_domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testListInactiveDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_inactive_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'PROXY',
'LDAP',
'INVALIDPROVIDER',
'INVALIDOPTION',
]
inactive_domains = sssdconfig.list_inactive_domains()
for domain in control_list:
self.assertTrue(domain in inactive_domains,
"Domain [%s] missing" %
domain)
for domain in inactive_domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testListDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not Initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.list_domains)
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
control_list = [
'IPA',
'LOCAL',
'PROXY',
'LDAP',
'INVALIDPROVIDER',
'INVALIDOPTION',
]
domains = sssdconfig.list_domains()
for domain in control_list:
self.assertTrue(domain in domains,
"Domain [%s] missing" %
domain)
for domain in domains:
self.assertTrue(domain in control_list,
"Domain [%s] unexpectedly found" %
domain)
def testGetDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.get_domain, 'sssd')
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
domain = sssdconfig.get_domain('IPA')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.assertTrue(domain.active)
domain = sssdconfig.get_domain('LDAP')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.assertFalse(domain.active)
# TODO verify the contents of this domain
self.assertTrue(domain.get_option('ldap_id_use_start_tls'))
# Negative Test - No such domain
self.assertRaises(SSSDConfig.NoDomainError, sssdconfig.get_domain, 'nosuchdomain')
# Positive Test - Domain with unknown provider
# Expected result: Domain is imported, but does not contain the
# unknown provider entry
domain = sssdconfig.get_domain('INVALIDPROVIDER')
self.assertFalse(domain.options.has_key('chpass_provider'))
# Positive Test - Domain with unknown option
# Expected result: Domain is imported, but does not contain the
# unknown option entry
domain = sssdconfig.get_domain('INVALIDOPTION')
self.assertFalse(domain.options.has_key('nosuchoption'))
def testNewDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.new_domain, 'example.com')
sssdconfig.new_config()
# Positive Test
domain = sssdconfig.new_domain('example.com')
self.assertTrue(isinstance(domain, SSSDConfig.SSSDDomain))
self.failUnless(domain.get_name() in sssdconfig.list_domains())
self.failUnless(domain.get_name() in sssdconfig.list_inactive_domains())
# TODO: check that the values of this new domain
# are set to the defaults from the schema
def testDeleteDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.delete_domain, 'IPA')
# Positive Test
sssdconfig.import_config(srcdir + '/testconfigs/sssd-valid.conf')
self.assertTrue('IPA' in sssdconfig.list_domains())
self.assertTrue('IPA' in sssdconfig.list_active_domains())
self.assertTrue(sssdconfig.has_section('domain/IPA'))
sssdconfig.delete_domain('IPA')
self.assertFalse('IPA' in sssdconfig.list_domains())
self.assertFalse('IPA' in sssdconfig.list_active_domains())
self.assertFalse(sssdconfig.has_section('domain/IPA'))
def testSaveDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
# Negative Test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError, sssdconfig.save_domain, 'IPA')
# Positive Test
sssdconfig.new_config()
domain = sssdconfig.new_domain('example.com')
domain.add_provider('ldap', 'id')
domain.set_option('ldap_uri', 'ldap://ldap.example.com')
domain.set_active(True)
sssdconfig.save_domain(domain)
self.assertTrue('example.com' in sssdconfig.list_domains())
self.assertTrue('example.com' in sssdconfig.list_active_domains())
self.assertEqual(sssdconfig.get('domain/example.com', 'ldap_uri'),
'ldap://ldap.example.com')
# Negative Test - Type Error
self.assertRaises(TypeError, sssdconfig.save_domain, self)
# Positive test - Change the domain name and save it
domain.set_name('example.com2')
self.assertEqual(domain.name,'example.com2')
self.assertEqual(domain.oldname,'example.com')
sssdconfig.save_domain(domain)
self.assertTrue('example.com2' in sssdconfig.list_domains())
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertTrue(sssdconfig.has_section('domain/example.com2'))
self.assertEqual(sssdconfig.get('domain/example.com2',
'ldap_uri'),
'ldap://ldap.example.com')
self.assertFalse('example.com' in sssdconfig.list_domains())
self.assertFalse('example.com' in sssdconfig.list_active_domains())
self.assertFalse('example.com' in sssdconfig.list_inactive_domains())
self.assertFalse(sssdconfig.has_section('domain/example.com'))
self.assertEquals(domain.oldname, None)
# Positive test - Set the domain inactive and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
domain.set_active(False)
sssdconfig.save_domain(domain)
self.assertFalse('example.com2' in sssdconfig.list_active_domains())
self.assertTrue('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)-1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)+1)
# Positive test - Set the domain active and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
domain.set_active(True)
sssdconfig.save_domain(domain)
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertFalse('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)+1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)-1)
# Positive test - Set the domain inactive and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
sssdconfig.deactivate_domain(domain.get_name())
self.assertFalse('example.com2' in sssdconfig.list_active_domains())
self.assertTrue('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)-1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)+1)
# Positive test - Set the domain active and save it
activelist = sssdconfig.list_active_domains()
inactivelist = sssdconfig.list_inactive_domains()
sssdconfig.activate_domain(domain.get_name())
self.assertTrue('example.com2' in sssdconfig.list_active_domains())
self.assertFalse('example.com2' in sssdconfig.list_inactive_domains())
self.assertEquals(len(sssdconfig.list_active_domains()),
len(activelist)+1)
self.assertEquals(len(sssdconfig.list_inactive_domains()),
len(inactivelist)-1)
# Positive test - Ensure that saved domains retain values
domain.set_option('ldap_krb5_init_creds', True)
domain.set_option('ldap_id_use_start_tls', False)
domain.set_option('ldap_user_search_base',
'cn=accounts, dc=example, dc=com')
self.assertTrue(domain.get_option('ldap_krb5_init_creds'))
self.assertFalse(domain.get_option('ldap_id_use_start_tls'))
self.assertEqual(domain.get_option('ldap_user_search_base'),
'cn=accounts, dc=example, dc=com')
sssdconfig.save_domain(domain)
of = '/tmp/testSaveDomain.out'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
domain2 = sssdconfig.get_domain('example.com2')
self.assertTrue(domain2.get_option('ldap_krb5_init_creds'))
self.assertFalse(domain2.get_option('ldap_id_use_start_tls'))
def testActivateDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
domain_name = 'PROXY'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_domain, domain_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test - Activate an inactive domain
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
sssdconfig.activate_domain('PROXY')
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
# Positive test - Activate an active domain
# This should succeed
sssdconfig.activate_domain('PROXY')
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
# Negative test - Invalid domain name
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, 'nosuchdomain')
# Negative test - Invalid domain name type
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, self)
def testDeactivateDomain(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
domain_name = 'IPA'
# Negative test - Not initialized
self.assertRaises(SSSDConfig.NotInitializedError,
sssdconfig.activate_domain, domain_name)
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Positive test -Deactivate an active domain
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertTrue(domain_name in sssdconfig.list_active_domains())
self.assertFalse(domain_name in sssdconfig.list_inactive_domains())
sssdconfig.deactivate_domain(domain_name)
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
# Positive test - Deactivate an inactive domain
# This should succeed
sssdconfig.deactivate_domain(domain_name)
self.assertTrue(domain_name in sssdconfig.list_domains())
self.assertFalse(domain_name in sssdconfig.list_active_domains())
self.assertTrue(domain_name in sssdconfig.list_inactive_domains())
# Negative test - Invalid domain name
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, 'nosuchdomain')
# Negative test - Invalid domain name type
self.assertRaises(SSSDConfig.NoDomainError,
sssdconfig.activate_domain, self)
if __name__ == "__main__":
error = 0
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDService)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x1
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDDomain)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x2
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestSSSDConfig)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x4
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestValid)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x8
suite = unittest.TestLoader().loadTestsFromTestCase(SSSDConfigTestInvalid)
res = unittest.TextTestRunner().run(suite)
if not res.wasSuccessful():
error |= 0x10
sys.exit(error)<|fim▁end|> | 'domains']
|
<|file_name|>MethodCallInstruction.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Created by IntelliJ IDEA.
* User: max
* Date: Jan 26, 2002
* Time: 10:48:52 PM
* To change template for new class use
* Code Style | Class Templates options (Tools | IDE Options).
*/
package com.intellij.codeInspection.dataFlow.instructions;
import com.intellij.codeInspection.dataFlow.*;
import com.intellij.codeInspection.dataFlow.value.DfaValue;
import com.intellij.psi.*;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class MethodCallInstruction extends Instruction {
@Nullable private final PsiCall myCall;
@Nullable private final PsiType myType;
@NotNull private final PsiExpression[] myArgs;
private final boolean myShouldFlushFields;
@NotNull private final PsiElement myContext;
@Nullable private final PsiMethod myTargetMethod;
private final List<MethodContract> myContracts;
private final MethodType myMethodType;
@Nullable private final DfaValue myPrecalculatedReturnValue;
private final boolean myOfNullable;
private final boolean myVarArgCall;
private final Map<PsiExpression, Nullness> myArgRequiredNullability;
private boolean myOnlyNullArgs = true;
private boolean myOnlyNotNullArgs = true;
public enum MethodType {
BOXING, UNBOXING, REGULAR_METHOD_CALL, CAST
}
public MethodCallInstruction(@NotNull PsiExpression context, MethodType methodType, @Nullable PsiType resultType) {
myContext = context;
myContracts = Collections.emptyList();
myMethodType = methodType;
myCall = null;
myArgs = PsiExpression.EMPTY_ARRAY;
myType = resultType;
myShouldFlushFields = false;
myPrecalculatedReturnValue = null;
myTargetMethod = null;
myVarArgCall = false;
myOfNullable = false;
myArgRequiredNullability = Collections.emptyMap();
}
public MethodCallInstruction(@NotNull PsiCall call, @Nullable DfaValue precalculatedReturnValue, List<MethodContract> contracts) {
myContext = call;
myContracts = contracts;
myMethodType = MethodType.REGULAR_METHOD_CALL;
myCall = call;
final PsiExpressionList argList = call.getArgumentList();
myArgs = argList != null ? argList.getExpressions() : PsiExpression.EMPTY_ARRAY;
myType = myCall instanceof PsiCallExpression ? ((PsiCallExpression)myCall).getType() : null;
JavaResolveResult result = call.resolveMethodGenerics();
myTargetMethod = (PsiMethod)result.getElement();
PsiSubstitutor substitutor = result.getSubstitutor();
if (argList != null && myTargetMethod != null) {
PsiParameter[] parameters = myTargetMethod.getParameterList().getParameters();
myVarArgCall = isVarArgCall(myTargetMethod, substitutor, myArgs, parameters);
myArgRequiredNullability = calcArgRequiredNullability(substitutor, parameters);
} else {
myVarArgCall = false;
myArgRequiredNullability = Collections.emptyMap();
}
myShouldFlushFields = !(call instanceof PsiNewExpression && myType != null && myType.getArrayDimensions() > 0) && !isPureCall();
myPrecalculatedReturnValue = precalculatedReturnValue;
myOfNullable = call instanceof PsiMethodCallExpression && DfaOptionalSupport.resolveOfNullable((PsiMethodCallExpression)call) != null;
}
private Map<PsiExpression, Nullness> calcArgRequiredNullability(PsiSubstitutor substitutor, PsiParameter[] parameters) {
int checkedCount = Math.min(myArgs.length, parameters.length) - (myVarArgCall ? 1 : 0);
Map<PsiExpression, Nullness> map = ContainerUtil.newHashMap();
for (int i = 0; i < checkedCount; i++) {
map.put(myArgs[i], DfaPsiUtil.getElementNullability(substitutor.substitute(parameters[i].getType()), parameters[i]));
}
return map;
}
public static boolean isVarArgCall(PsiMethod method, PsiSubstitutor substitutor, PsiExpression[] args, PsiParameter[] parameters) {
if (!method.isVarArgs()) {
return false;<|fim▁hole|> int argCount = args.length;
int paramCount = parameters.length;
if (argCount > paramCount) {
return true;
}
if (paramCount > 0 && argCount == paramCount) {
PsiType lastArgType = args[argCount - 1].getType();
if (lastArgType != null && !substitutor.substitute(parameters[paramCount - 1].getType()).isAssignableFrom(lastArgType)) {
return true;
}
}
return false;
}
private boolean isPureCall() {
if (myTargetMethod == null) return false;
return ControlFlowAnalyzer.isPure(myTargetMethod);
}
@Nullable
public PsiType getResultType() {
return myType;
}
@NotNull
public PsiExpression[] getArgs() {
return myArgs;
}
public MethodType getMethodType() {
return myMethodType;
}
public boolean shouldFlushFields() {
return myShouldFlushFields;
}
@Nullable
public PsiMethod getTargetMethod() {
return myTargetMethod;
}
public boolean isVarArgCall() {
return myVarArgCall;
}
@Nullable
public Nullness getArgRequiredNullability(@NotNull PsiExpression arg) {
return myArgRequiredNullability.get(arg);
}
public List<MethodContract> getContracts() {
return myContracts;
}
@Override
public DfaInstructionState[] accept(DataFlowRunner runner, DfaMemoryState stateBefore, InstructionVisitor visitor) {
return visitor.visitMethodCall(this, runner, stateBefore);
}
@Nullable
public PsiCall getCallExpression() {
return myCall;
}
@NotNull
public PsiElement getContext() {
return myContext;
}
@Nullable
public DfaValue getPrecalculatedReturnValue() {
return myPrecalculatedReturnValue;
}
public String toString() {
return myMethodType == MethodType.UNBOXING
? "UNBOX"
: myMethodType == MethodType.BOXING
? "BOX" :
"CALL_METHOD: " + (myCall == null ? "null" : myCall.getText());
}
public boolean updateOfNullable(DfaMemoryState memState, DfaValue arg) {
if (!myOfNullable) return false;
if (!memState.isNotNull(arg)) {
myOnlyNotNullArgs = false;
}
if (!memState.isNull(arg)) {
myOnlyNullArgs = false;
}
return true;
}
public boolean isOptionalAlwaysNullProblem() {
return myOfNullable && myOnlyNullArgs;
}
public boolean isOptionalAlwaysNotNullProblem() {
return myOfNullable && myOnlyNotNullArgs;
}
}<|fim▁end|> | }
|
<|file_name|>monitor-list.component.ts<|end_file_name|><|fim▁begin|>import { Component, Input, OnInit } from '@angular/core';
import { MonitorApiService } from '../../services/monitor-api/monitor-api.service';
import { Monitor } from '../../models/monitor';
@Component({
selector: 'app-monitor-list',
templateUrl: './monitor-list.component.html',
styleUrls: ['./monitor-list.component.scss'],
providers: [MonitorApiService]
})
export class MonitorListComponent implements OnInit {
private isLoading: boolean;
private didLoadingFail: boolean;
private responseStatusCode: any;
private monitors: Monitor[];
private didDeleteMonitorFail: boolean;
private shouldShowDeleteMonitorAlert: boolean;
private deleteMonitorAlertText: string;
constructor(private monitorApiService: MonitorApiService) {
}
ngOnInit() {
this.loadMonitors();
}
private loadMonitors() {
this.isLoading = true;
this.monitorApiService
.getMonitors()
.subscribe(data => {
this.finishLoading(data);
}, err => {
this.onLoadError(err);
});
}
private finishLoading(data) {
this.isLoading = false;
this.didLoadingFail = false;
this.monitors = data;
}
private onLoadError(err) {
this.isLoading = false;
this.didLoadingFail = true;
this.responseStatusCode = err.status;
}
private hasMonitors(): boolean {
return this.monitors && this.monitors.length > 0;
}
private shouldListMonitors(): boolean {
return !this.isLoading
&& !this.didLoadingFail
&& this.monitors
&& this.monitors.length > 0;
}
private getIsActiveText(monitor: Monitor): string {
return monitor.isActive === true ? 'Yes' : 'No';
}
private deleteMonitor(monitor: Monitor) {
if (confirm('Are you sure you want to delete this monitor?')) {
this.monitorApiService
.deleteMonitor(monitor._id)
.subscribe(data => {
console.log('deleted');
this.shouldShowDeleteMonitorAlert = true;
this.didDeleteMonitorFail = false;
this.deleteMonitorAlertText = 'Monitor deleted successfully.';
this.loadMonitors();
}, err => {
this.shouldShowDeleteMonitorAlert = true;
this.didDeleteMonitorFail = true;
this.deleteMonitorAlertText = 'Error occurred when deleting monitor.';
});
}
}
<|fim▁hole|> && this.deleteMonitorAlertText !== undefined
&& this.deleteMonitorAlertText.length > 0;
}
}<|fim▁end|> | private shouldDisplayDeleteMonitorAlertText(): boolean {
return this.shouldShowDeleteMonitorAlert === true
&& this.deleteMonitorAlertText !== null |
<|file_name|>0006_auto_20160303_2138.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20160303_2340'),
('manager', '0005_auto_20160303_2008'),
]
operations = [
migrations.RenameField(
model_name='setting',
old_name='unit',
new_name='repetition_unit',
),
migrations.RenameField(
model_name='workoutlog',
old_name='unit',
new_name='repetition_unit',
),
migrations.AddField(
model_name='setting',
name='weight_unit',
field=models.ForeignKey(verbose_name='Unit', to='core.WeightUnit', default=1),
),<|fim▁hole|> migrations.AddField(
model_name='workoutlog',
name='weight_unit',
field=models.ForeignKey(verbose_name='Unit', to='core.WeightUnit', default=1),
),
]<|fim▁end|> | |
<|file_name|>create_init_file.py<|end_file_name|><|fim▁begin|>## Package for various utility functions to execute build and shell commands
#
# @copyright (c) 2007 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# [email protected]
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.<|fim▁hole|># You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# @author Malte Marquarding <[email protected]>
#
from __future__ import with_statement
import os
def create_init_file(name, env):
'''
Create package initialization file. e.g. init_package_env.sh
:param name: file name to be created.
:type name: string
:param env: the environment object.
:type env: :class:`.Environment`
:return: None
'''
aroot = os.environ["ASKAP_ROOT"]
inittxt= """#
# ASKAP auto-generated file
#
ASKAP_ROOT=%s
export ASKAP_ROOT
""" % aroot
vartxt = """if [ "${%(key)s}" != "" ]
then
%(key)s=%(value)s:${%(key)s}
else
%(key)s=%(value)s
fi
export %(key)s
"""
with open(name, 'w') as initfile:
initfile.write(inittxt)
for k, v in env.items():
if not v:
continue
if k == "LD_LIBRARY_PATH":
k = env.ld_prefix+k
v = v.replace(aroot, '${ASKAP_ROOT}')
initfile.write(vartxt % { 'key': k, 'value': v } )<|fim▁end|> | # |
<|file_name|>rich-textarea-field-response.js<|end_file_name|><|fim▁begin|>import React from "react";
import PropTypes from "prop-types";
import styled from "@emotion/styled";<|fim▁hole|>
const RichTextareaFieldResponseWrapper = styled("div")(props => ({
// TODO: fluid video
lineHeight: "1.3rem",
"& img": {
maxWidth: "100%",
margin: "16px 0 0 0",
},
"& ul": {
marginTop: 0,
marginBottom: "16px",
fontWeight: "normal",
},
"& p": {
fontFamily: props.theme.text.bodyFontFamily,
marginTop: 0,
marginBottom: "16px",
fontWeight: "normal",
},
"& li": {
fontFamily: props.theme.text.bodyFontFamily,
fontWeight: "normal",
},
"& a": {
textDectoration: "none",
fontWeight: "normal",
},
"& h1,h2,h3,h4,h5,h6": {
fontFamily: props.theme.text.titleFontFamily,
margin: "16px 0 8px 0",
},
}));
const RichTextareaFieldResponse = props => {
return (
<RichTextareaFieldResponseWrapper>
<div
className="rich-textarea-field-response"
dangerouslySetInnerHTML={{
__html: insertEmbeddedImages(props.value, props.attachments),
}}
/>
</RichTextareaFieldResponseWrapper>
);
};
RichTextareaFieldResponse.propTypes = {
attachments: PropTypes.array,
value: PropTypes.string.isRequired,
};
export default RichTextareaFieldResponse;<|fim▁end|> |
import { insertEmbeddedImages } from "../../../utils/embedded-images"; |
<|file_name|>number.ts<|end_file_name|><|fim▁begin|>import Vue from 'vue';
Vue.filter('number', (n) => {
return n.toLocaleString();<|fim▁hole|>});<|fim▁end|> | |
<|file_name|>QuotaChecker.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2017 Yahoo Holdings, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms;
import java.util.List;
import com.yahoo.athenz.zms.store.ObjectStoreConnection;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.rdl.Timestamp;
class QuotaChecker {
private final Quota defaultQuota;
private boolean quotaCheckEnabled;
public QuotaChecker() {
// first check if the quota check is enabled or not
quotaCheckEnabled = Boolean.parseBoolean(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_CHECK, "true"));
// retrieve default quota values
int roleQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_ROLE, "1000"));
int roleMemberQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_ROLE_MEMBER, "100"));
int policyQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_POLICY, "1000"));
int assertionQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_ASSERTION, "100"));
int serviceQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_SERVICE, "250"));
int serviceHostQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_SERVICE_HOST, "10"));
int publicKeyQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_PUBLIC_KEY, "100"));
int entityQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_ENTITY, "100"));
int subDomainQuota = Integer.parseInt(System.getProperty(ZMSConsts.ZMS_PROP_QUOTA_SUBDOMAIN, "100"));
defaultQuota = new Quota().setName("server-default")
.setAssertion(assertionQuota).setEntity(entityQuota)
.setPolicy(policyQuota).setPublicKey(publicKeyQuota)
.setRole(roleQuota).setRoleMember(roleMemberQuota)
.setService(serviceQuota).setServiceHost(serviceHostQuota)
.setSubdomain(subDomainQuota).setModified(Timestamp.fromCurrentTime());
}
public Quota getDomainQuota(ObjectStoreConnection con, String domainName) {
Quota quota = con.getQuota(domainName);
return (quota == null) ? defaultQuota : quota;
}
void setQuotaCheckEnabled(boolean quotaCheckEnabled) {
this.quotaCheckEnabled = quotaCheckEnabled;
}
int getListSize(List<?> list) {
return (list == null) ? 0 : list.size();
}
void checkSubdomainQuota(ObjectStoreConnection con, String domainName, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// for sub-domains we need to run the quota check against
// the top level domain so let's get that first. If we are
// creating a top level domain then there is no need for
// quota check
int idx = domainName.indexOf('.');
if (idx == -1) {
return;
}
final String topLevelDomain = domainName.substring(0, idx);
// now get the quota for the top level domain
final Quota quota = getDomainQuota(con, topLevelDomain);
// get the list of sub-domains for our given top level domain
final String domainPrefix = topLevelDomain + ".";
int objectCount = con.listDomains(domainPrefix, 0).size() + 1;
if (quota.getSubdomain() < objectCount) {
throw ZMSUtils.quotaLimitError("subdomain quota exceeded - limit: "
+ quota.getSubdomain() + " actual: " + objectCount, caller);
}
}
void checkRoleQuota(ObjectStoreConnection con, String domainName, Role role, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// if our role is null then there is no quota check
if (role == null) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// first we're going to verify the elements that do not
// require any further data from the object store
int objectCount = getListSize(role.getRoleMembers());
if (quota.getRoleMember() < objectCount) {
throw ZMSUtils.quotaLimitError("role member quota exceeded - limit: "
+ quota.getRoleMember() + " actual: " + objectCount, caller);
}
// now we're going to check if we'll be allowed
// to create this role in the domain
objectCount = con.countRoles(domainName) + 1;
if (quota.getRole() < objectCount) {
throw ZMSUtils.quotaLimitError("role quota exceeded - limit: "
+ quota.getRole() + " actual: " + objectCount, caller);
}
}
void checkRoleMembershipQuota(ObjectStoreConnection con, String domainName,
String roleName, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// now check to make sure we can add 1 more member
// to this role without exceeding the quota
int objectCount = con.countRoleMembers(domainName, roleName) + 1;
if (quota.getRoleMember() < objectCount) {
throw ZMSUtils.quotaLimitError("role member quota exceeded - limit: "
+ quota.getRoleMember() + " actual: " + objectCount, caller);
}
}
void checkPolicyQuota(ObjectStoreConnection con, String domainName, Policy policy, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// if our policy is null then there is no quota check
if (policy == null) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// first we're going to verify the elements that do not
// require any further data from the object store
int objectCount = getListSize(policy.getAssertions());
if (quota.getAssertion() < objectCount) {<|fim▁hole|> }
// now we're going to check if we'll be allowed
// to create this policy in the domain
objectCount = con.countPolicies(domainName) + 1;
if (quota.getPolicy() < objectCount) {
throw ZMSUtils.quotaLimitError("policy quota exceeded - limit: "
+ quota.getPolicy() + " actual: " + objectCount, caller);
}
}
void checkPolicyAssertionQuota(ObjectStoreConnection con, String domainName,
String policyName, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// now check to make sure we can add 1 more assertion
// to this policy without exceeding the quota
int objectCount = con.countAssertions(domainName, policyName) + 1;
if (quota.getAssertion() < objectCount) {
throw ZMSUtils.quotaLimitError("policy assertion quota exceeded - limit: "
+ quota.getAssertion() + " actual: " + objectCount, caller);
}
}
void checkServiceIdentityQuota(ObjectStoreConnection con, String domainName,
ServiceIdentity service, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// if our service is null then there is no quota check
if (service == null) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// first we're going to verify the elements that do not
// require any further data from the object store
int objectCount = getListSize(service.getHosts());
if (quota.getServiceHost() < objectCount) {
throw ZMSUtils.quotaLimitError("service host quota exceeded - limit: "
+ quota.getServiceHost() + " actual: " + objectCount, caller);
}
objectCount = getListSize(service.getPublicKeys());
if (quota.getPublicKey() < objectCount) {
throw ZMSUtils.quotaLimitError("service public key quota exceeded - limit: "
+ quota.getPublicKey() + " actual: " + objectCount, caller);
}
// now we're going to check if we'll be allowed
// to create this service in the domain
objectCount = con.countServiceIdentities(domainName) + 1;
if (quota.getService() < objectCount) {
throw ZMSUtils.quotaLimitError("service quota exceeded - limit: "
+ quota.getService() + " actual: " + objectCount, caller);
}
}
void checkServiceIdentityPublicKeyQuota(ObjectStoreConnection con, String domainName,
String serviceName, String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// now check to make sure we can add 1 more public key
// to this policy without exceeding the quota
int objectCount = con.countPublicKeys(domainName, serviceName) + 1;
if (quota.getPublicKey() < objectCount) {
throw ZMSUtils.quotaLimitError("service public key quota exceeded - limit: "
+ quota.getPublicKey() + " actual: " + objectCount, caller);
}
}
void checkEntityQuota(ObjectStoreConnection con, String domainName, Entity entity,
String caller) {
// if quota check is disabled we have nothing to do
if (!quotaCheckEnabled) {
return;
}
// if our entity is null then there is no quota check
if (entity == null) {
return;
}
// first retrieve the domain quota
final Quota quota = getDomainQuota(con, domainName);
// we're going to check if we'll be allowed
// to create this entity in the domain
int objectCount = con.countEntities(domainName) + 1;
if (quota.getEntity() < objectCount) {
throw ZMSUtils.quotaLimitError("entity quota exceeded - limit: "
+ quota.getEntity() + " actual: " + objectCount, caller);
}
}
}<|fim▁end|> | throw ZMSUtils.quotaLimitError("policy assertion quota exceeded - limit: "
+ quota.getAssertion() + " actual: " + objectCount, caller); |
<|file_name|>exec.go<|end_file_name|><|fim▁begin|>package exec
import (
"bytes"
"fmt"
"io"
"os"
osexec "os/exec"<|fim▁hole|> "github.com/zimmski/backup"
)
// Combined executes a command with given arguments and returns the combined output
func Combined(name string, args ...string) (string, error) {
if backup.Verbose {
fmt.Fprintln(os.Stderr, "Execute: ", name, strings.Join(args, " "))
}
cmd := osexec.Command(name, args...)
out, err := cmd.CombinedOutput()
return string(out), err
}
// CombinedWithDirectOutput executes a command with given arguments and prints (to StdOut) and returns the combined output
func CombinedWithDirectOutput(name string, args ...string) (string, error) {
if backup.Verbose {
fmt.Fprintln(os.Stderr, "Execute: ", name, strings.Join(args, " "))
}
cmd := osexec.Command(name, args...)
var buf bytes.Buffer
out := io.MultiWriter(os.Stdout, &buf)
cmd.Stderr = out
cmd.Stdout = out
err := cmd.Run()
return buf.String(), err
}
// Command returns a generic exec command
func Command(name string, args ...string) *osexec.Cmd {
if backup.Verbose {
fmt.Fprintln(os.Stderr, "Execute: ", name, strings.Join(args, " "))
}
return osexec.Command(name, args...)
}<|fim▁end|> | "strings"
|
<|file_name|>eggie.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. <|fim▁hole|># this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()<|fim▁end|> | #2. Redistributions in binary form must reproduce the above copyright notice, |
<|file_name|>macros.rs<|end_file_name|><|fim▁begin|>//! Macros.
/// If the input is an error, prints it SMT-LIB-style and panics.
#[macro_export]
macro_rules! expect {
($e:expr => |$err:pat| $($action:tt)*) => (
match $e {
Ok(res) => res,
Err($err) => {
$crate::errors::print_err(
& { $($action)* }.into()
) ;
panic!("Fatal internal error, please contact the developper")
}
}
) ;
($e:expr) => (
expect! {
$e => |e| e
}
) ;
}
/// Fails with some message, SMT-LIB-style.
#[macro_export]
macro_rules! fail_with {
( $($head:expr),* $(,)* $( ; $($blah:expr),* $(,)* )* $(;)* ) => ({
let err: Res<()> = Err(
format!($($head),*).into()
) ;
$(
let err = err.chain_err(
|| format!( $($blah),* )
) ;
)*
expect!(err) ;
unreachable!()
}) ;
}
/// Bails with unsat.
///
/// Logs unsat (`@info`) and the input message if any (`@debug`).
#[macro_export]
macro_rules! unsat {
($($stuff:tt)*) => ({
log! { @info "unsat" } ;
log! { @debug $($stuff)* } ;
bail!($crate::errors::ErrorKind::Unsat)
}) ;
}
/// Bails with unknown.
#[macro_export]
macro_rules! unknown {
($($stuff:tt)*) => ({
log! { @debug $($stuff)* } ;
bail!($crate::errors::ErrorKind::Unknown)
}) ;
}
/// Wraps stuff in a block, usually to please borrow-checking.
#[macro_export]
macro_rules! scoped {
($($tokens:tt)*) => ({
$($tokens)*
})
}
/// Chains some errors and bails.
#[macro_export]
macro_rules! err_chain {
($head:expr $(=> $tail:expr)*) => ({
let mut err: Error = $head.into() ;
$(
err = err.chain_err(|| $tail) ;
)*
bail!(err)
})
}
/// Guards something by a log level, inactive in bench mode.
#[cfg(not(feature = "bench"))]
#[macro_export]
macro_rules! if_log {
( @$flag:tt then { $($then:tt)* } else { $($else:tt)* } ) => (
if log!(|cond_of| $flag) { $($then)* } else { $($else)* }
) ;
(@$flag:tt $($stuff:tt)*) => (
if_log! { @$flag then { $($stuff)* } else { () } }
) ;
}
#[cfg(feature = "bench")]
#[macro_export]
macro_rules! if_log {
( @$flag:tt then { $($then:tt)* } else { $($else:tt)* } ) => (
$($else)*
) ;
(@$flag:tt $($stuff:tt)*) => (()) ;
}
/** Logging macro, inactive in bench mode.
| Log level | active when... | prefix (`_` are spaces) |
|-----------|--------------------|:------------------------------------|
| `@0` | `true` | `";_"` |
| `@warn` | `true` | `";_Warning_|_"` |
| `@info` | `conf.verb ≥ 1` | `";_"` |
| `@verb` | `conf.verb ≥ 2` | `";___"` |
| `@debug` | `conf.verb ≥ 3` | `";_____"` |
| `@<i>` | `conf.verb ≥ <i>` | `";_"` followed by `<i> * 2` spaces |
*/
#[macro_export]
#[cfg(not(feature = "bench"))]
macro_rules! log {
(|pref_of| debug) => ( log!(|pref_of| 2) ) ;
(|pref_of| verb) => ( log!(|pref_of| 1) ) ;
(|pref_of| info) => ( log!(|pref_of| 0) ) ;
(|pref_of| warn) => ( format!("{}Warning | ", log!(|pref_of| 0)) ) ;
(|pref_of| 0) => ( format!("; ") ) ;
(|pref_of| $int:expr) => (
format!("; {:width$}", "", width = ($int - 1) * 2)
) ;
(|cond_of| debug) => ( log!(|cond_of| 3) ) ;
(|cond_of| verb) => ( log!(|cond_of| 2) ) ;
(|cond_of| info) => ( log!(|cond_of| 1) ) ;
(|cond_of| warn) => ( true ) ;
(|cond_of| 0) => (true) ;
(|cond_of| $int:expr) => (conf.verb >= $int) ;
( $cond:expr, $op:tt @$flag:tt $($tail:tt)* ) => (
if $cond $op log!(|cond_of| $flag) {
log! { log!(|pref_of| $flag) => $($tail)* }
}
) ;
( @$flag:tt |=> $($tail:tt)* ) => (
log! { > log!(|pref_of| $flag) => $($tail)* }
) ;
( @$flag:tt | $($tail:tt)* ) => (
if log!(|cond_of| $flag) {
log! { > log!(|pref_of| $flag) => $($tail)* }
}
) ;
( @$flag:tt => $($tail:tt)* ) => (
log! { log!(|pref_of| $flag) => $($tail)* }
) ;
( @$flag:tt $($tail:tt)* ) => (
if log!(|cond_of| $flag) {
log! { log!(|pref_of| $flag) => $($tail)* }
}
) ;
( > $pref:expr => $( $str:expr $(, $args:expr)* $(,)* );* ) => ({
$(
println!("{}{}", $pref, format!($str $(, $args)*)) ;
)*
()
}) ;
( > $( $str:expr $(, $args:expr)* $(,)* );* ) => ({
$(
println!("; {}", format!($str $(, $args)*))
)*
()
}) ;
( $pref:expr => $( $str:expr $(, $args:expr)* $(,)* );* ) => ({
$(
for line in format!($str $(, $args)*).lines() {
if line != "" {
println!("{}{}", $pref, line)
} else {
println!()
}
}
)*
()
}) ;
( $( $str:expr $(, $args:expr)* $(,)* );* ) => ({
$(
for line in format!($str $(, $args)*).lines() {
if line != "" {
println!("; {}", line)
} else {
println!()
}
}
)*
()
}) ;
}
#[cfg(feature = "bench")]
macro_rules! log {
(|pref_of| $($stuff:tt)*) => {
""
};
(|cond_of| $($stuff:tt)*) => {
false
};
($($stuff:tt)*) => {
()
};
}
/// Same as `log! { @verb ... }`.
#[macro_export]
macro_rules! log_verb {
( $($stuff:tt)* ) => (
log! { @verb $($stuff)* }
) ;
}
/// Same as `log! { @info ... }`.
#[macro_export]
macro_rules! log_info {
( $($stuff:tt)* ) => (
log! { @info $($stuff)* }
) ;
}
/// Same as `log! { @debug ... }`.
#[macro_export]
#[allow(unused_macros)]
macro_rules! log_debug {
( $($stuff:tt)* ) => (
log! { @debug $($stuff)* }
) ;
}
/// Prints a warning SMT-LIB-style.
///
/// **Active in bench mode.**
#[macro_export]
macro_rules! warn {
( $( $str:expr $(, $args:expr)* $(,)* );* ) => ({
println!(
"; {}", $crate::common::conf.sad("|===| Warning:")
) ;
$(
print!("; {} ", $crate::common::conf.sad("|")) ;
println!( $str $(, $args)* ) ;
)*
println!("; {}", $crate::common::conf.sad("|===|"))
}) ;
}
/// Does something if not in bench mode.
#[macro_export]
#[cfg(not(feature = "bench"))]
macro_rules! if_not_bench {
( then { $($then:tt)* } else { $($else:tt)* } ) => (
$($then)*
) ;
($($stuff:tt)*) => (
if_not_bench! {
then { $($stuff)* } else { () }
}
) ;
}
/// Does something if not in bench mode.
#[cfg(feature = "bench")]
macro_rules! if_not_bench {
( then { $($then:tt)* } else { $($else:tt)* } ) => (
$($else)*
) ;
($($stuff:tt)*) => (()) ;
}
/// Same as `if_log! { @verb ... }`.
#[macro_export]
macro_rules! if_verb {
($($stuff:tt)*) => ( if_log! { @verb $($stuff)* } ) ;
}
/// Same as `if_log! { @debug ... }` .
#[macro_export]
macro_rules! if_debug {
($($stuff:tt)*) => ( if_log! { @debug $($stuff)* } ) ;
}
/// Profiling macro, inactive in bench mode.
///
/// If passed `self`, assumes `self` has a `_profiler` field.
#[macro_export]
#[cfg(not(feature = "bench"))]
macro_rules! profile {
( | $stuff:ident $(. $prof:ident)* |
wrap $b:block $( $scope:expr ),+ $(,)*
) => ({
profile! { | $stuff $(. $prof)* | tick $($scope),+ }
let res = $b ;
profile! { | $stuff $(. $prof)* | mark $($scope),+ }
res
}) ;
( | $stuff:ident $(. $prof:ident)* | $stat:expr => add $e:expr ) => (<|fim▁hole|> $stuff$(.$prof)*.stat_do( $stat, |val| val + $e )
}
) ;
( | $stuff:ident $(. $prof:ident)* | $stat:expr => set $e:expr ) => (
if conf.stats {
$stuff$(.$prof)*.stat_do( $stat, |val| val + $e )
}
) ;
( | $stuff:ident $(. $prof:ident)* |
$meth:ident $( $scope:expr ),+ $(,)*
) => (
if conf.stats {
$stuff$(.$prof)*.$meth(
vec![ $($scope),+ ]
)
}
) ;
( $slf:ident wrap $b:block $( $scope:expr ),+ $(,)* ) => (
profile! { |$slf._profiler| wrap $b $($scope),+ }
) ;
( $slf:ident $stat:expr => add $e:expr ) => (
profile!{ |$slf._profiler| $stat => add $e }
) ;
( $slf:ident $meth:ident $( $scope:expr ),+ $(,)* ) => (
profile!{ |$slf._profiler| $meth $($scope),+ }
) ;
}
#[cfg(feature = "bench")]
macro_rules! profile {
( | $stuff:ident $(. $prof:ident)* |
wrap $b:block $( $scope:expr ),+ $(,)*
) => {
$b
};
( $slf:ident
wrap $b:block $( $scope:expr ),+ $(,)*
) => {
$b
};
( $($stuff:tt)* ) => {
()
};
}
/// Messaging macro, compiled to nothing in bench mode.
#[cfg(not(feature = "bench"))]
#[macro_export]
macro_rules! msg {
( @$flag:tt $slf:expr => $($tt:tt)* ) => (
if log!(|cond_of| $flag) {
msg!( force $slf => $($tt)* )
}
) ;
( debug $slf:expr => $($tt:tt)* ) => (
if conf.verb >= 3 {
msg!( force $slf => $($tt)* )
}
) ;
( force $slf:expr => $e:tt ) => (
$slf.msg($e) ? ;
) ;
( force $slf:expr => $($tt:tt)* ) => (
$slf.msg( format!( $($tt)* ) ) ? ;
) ;
( $core:expr => $e:expr ) => (
if_debug!( $core.msg($e) ? )
) ;
( $slf:expr => $($tt:tt)* ) => (
msg!{ $slf => format!( $($tt)* ) }
) ;
}
#[macro_export]
#[cfg(feature = "bench")]
macro_rules! msg {
( $($tt:tt)* ) => {
()
};
}
/// Yields the value if the type (int or bool) matches, otherwise
/// `return`s `Ok(Val::N)`.
///
/// ```rust
/// use hoice::term::Val ;
/// use hoice::errors ;
///
/// fn int(val: Val) -> Res<Val> {
/// Ok( try_val!{ int val } )
/// }
/// fn bool(val: Val) -> Res<Val> {
/// Ok( try_val!{ bool val } )
/// }
///
/// let none = Val::N ;
///
/// let val: Val = 7.into() ;
/// assert_eq!{ int( val.clone() ), val }
/// assert_eq!{ bool( val.clone() ), none }
///
/// let val: Val = false.into() ;
/// assert_eq!{ int( val.clone() ), none }
/// assert_eq!{ bool( val.clone() ), val }
///
/// assert_eq!{ int( none.clone() ), none }
/// assert_eq!{ bool( none.clone() ), none }
/// ```
macro_rules! try_val {
(int $e:expr) => {
if let Some(i) = $e.to_int()? {
i
} else {
return Ok($crate::val::none($crate::term::typ::int()));
}
};
(real $e:expr) => {
if let Some(r) = $e.to_real()? {
r
} else {
return Ok($crate::val::none($crate::term::typ::real()));
}
};
(bool $e:expr) => {
if let Some(b) = $e.to_bool()? {
b
} else {
return Ok($crate::val::none($crate::term::typ::bool()));
}
};
}
/// Dumps an instance if the `PreprocConf` flag says so.
macro_rules! preproc_dump {
($instance:expr => $file:expr, $blah:expr) => {
if let Some(mut file) = conf.preproc.instance_log_file($file, &$instance)? {
$instance.dump_as_smt2(&mut file, $blah)
} else {
Ok(())
}
};
}
/// `Int` writer.
#[macro_export]
macro_rules! int_to_smt {
($writer:expr, $i:expr) => {
if $i.is_negative() {
write!($writer, "(- {})", -$i)
} else {
write!($writer, "{}", $i)
}
};
}
/// `Rat` writer.
#[macro_export]
macro_rules! rat_to_smt {
($writer:expr, $r:expr) => {{
let (num, den) = ($r.numer(), $r.denom());
debug_assert!(!den.is_negative());
if num.is_zero() {
write!($writer, "0.0")
} else if !num.is_negative() {
if den.is_one() {
write!($writer, "{}.0", num)
} else {
write!($writer, "(/ {} {})", num, den)
}
} else {
if den.is_one() {
write!($writer, "(- {}.0)", -num)
} else {
write!($writer, "(- (/ {} {}))", -num, den)
}
}
}};
}
/// Test macros
#[cfg(test)]
#[macro_use]
mod test {
/// Turns a sequence of values into a `Cex` (`VarMap<Val>`).
#[macro_export]
macro_rules! model {
( $($values:expr),* ) => (
$crate::common::VarMap::of(
vec![ $( $values ),* ]
)
) ;
}
/// Checks that the result of an evaluation yields the correct value.
///
/// Prints information before the check.
#[macro_export]
macro_rules! assert_eval {
( int $model:expr => $expr:expr, $value:expr ) => {{
let res = $expr.eval(&$model).unwrap().to_int().unwrap().unwrap();
println!(
"{} evaluated with {} is {}, expecting {}",
$expr, $model, res, $value
);
assert_eq!(res, $value.into())
}};
( real $model:expr => $expr:expr, $value:expr ) => {{
let res = $expr.eval(&$model).unwrap().to_real().unwrap().unwrap();
println!(
"{} evaluated with {} is {}, expecting {}",
$expr, $model, res, $value
);
assert_eq!(res, rat_of_float($value))
}};
( bool not $model:expr => $expr:expr ) => {{
let res = $expr.eval(&$model).unwrap().to_bool().unwrap().unwrap();
println!(
"{} evaluated with {} is {}, expecting false",
$expr, $model, res
);
assert!(!res)
}};
( bool $model:expr => $expr:expr ) => {{
let res = $expr.eval(&$model).unwrap().to_bool().unwrap().unwrap();
println!(
"{} evaluated with {} is {}, expecting true",
$expr, $model, res
);
assert!(res)
}};
}
}
/// Creates some values for some variables.
///
/// Used in tests.
#[macro_export]
macro_rules! r_var_vals {
(@make($vec:expr) (int $e:expr) $($tail:tt)*) => ({
$vec.push( $crate::val::int($e) );
r_var_vals!(@make($vec) $($tail)*)
});
(@make($vec:expr) (real $e:expr) $($tail:tt)*) => ({
$vec.push( $crate::val::real_of($e as f64) );
r_var_vals!(@make($vec) $($tail)*)
});
(@make($vec:expr) (bool $e:expr) $($tail:tt)*) => ({
$vec.push( $crate::val::bool($e) );
r_var_vals!(@make($vec) $($tail)*)
});
(@make($vec:expr) ($e:expr) $($tail:tt)*) => ({
$vec.push( $e );
r_var_vals!(@make($vec) $($tail)*)
});
(@make($vec:expr)) => (());
($($stuff:tt)*) => ({
let mut vec = vec![];
r_var_vals! { @make(vec) $($stuff)* }
let vals: $crate::var_to::vals::RVarVals = vec.into();
vals
});
}
/// Creates some values for some variables (hash-consed).
#[macro_export]
macro_rules! var_vals {
($($stuff:tt)*) => ({
let r_var_vals = r_var_vals!($($stuff)*);
$crate::var_to::vals::new(r_var_vals)
});
}<|fim▁end|> | if conf.stats { |
<|file_name|>buffer.rs<|end_file_name|><|fim▁begin|>/// Memory buffers for the benefit of `std::io::net` which has slow read/write.
use std::io::{Reader, Writer, Stream};
use std::cmp::min;
use std::vec;
// 64KB chunks (moderately arbitrary)
static READ_BUF_SIZE: uint = 0x10000;
static WRITE_BUF_SIZE: uint = 0x10000;
// TODO: consider removing constants and giving a buffer size in the constructor
pub struct BufferedStream<T> {
wrapped: T,
read_buffer: ~[u8],
// The current position in the buffer
read_pos: uint,
// The last valid position in the reader
read_max: uint,
write_buffer: ~[u8],
write_len: uint,
writing_chunked_body: bool,
}
impl<T: Stream> BufferedStream<T> {
pub fn new(stream: T) -> BufferedStream<T> {
let mut read_buffer = vec::with_capacity(READ_BUF_SIZE);
unsafe { read_buffer.set_len(READ_BUF_SIZE); }
let mut write_buffer = vec::with_capacity(WRITE_BUF_SIZE);
unsafe { write_buffer.set_len(WRITE_BUF_SIZE); }
BufferedStream {
wrapped: stream,
read_buffer: read_buffer,
read_pos: 0u,
read_max: 0u,
write_buffer: write_buffer,
write_len: 0u,
writing_chunked_body: false,
}
}
}
impl<T: Reader> BufferedStream<T> {
/// Poke a single byte back so it will be read next. For this to make sense, you must have just
/// read that byte. If `self.pos` is 0 and `self.max` is not 0 (i.e. if the buffer is just
/// filled
/// Very great caution must be used in calling this as it will fail if `self.pos` is 0.
pub fn poke_byte(&mut self, byte: u8) {
match (self.read_pos, self.read_max) {
(0, 0) => self.read_max = 1,
(0, _) => fail!("poke called when buffer is full"),
(_, _) => self.read_pos -= 1,
}
self.read_buffer[self.read_pos] = byte;
}
#[inline]
fn fill_buffer(&mut self) -> bool {
assert_eq!(self.read_pos, self.read_max);
match self.wrapped.read(self.read_buffer) {
None => {
self.read_pos = 0;
self.read_max = 0;
false
},
Some(i) => {
self.read_pos = 0;
self.read_max = i;
true
},
}
}
/// Slightly faster implementation of read_byte than that which is provided by ReaderUtil
/// (which just uses `read()`)
#[inline]
pub fn read_byte(&mut self) -> Option<u8> {
if self.read_pos == self.read_max && !self.fill_buffer() {<|fim▁hole|> self.read_pos += 1;
Some(self.read_buffer[self.read_pos - 1])
}
}
impl<T: Writer> BufferedStream<T> {
/// Finish off writing a response: this flushes the writer and in case of chunked
/// Transfer-Encoding writes the ending zero-length chunk to indicate completion.
///
/// At the time of calling this, headers MUST have been written, including the
/// ending CRLF, or else an invalid HTTP response may be written.
pub fn finish_response(&mut self) {
self.flush();
if self.writing_chunked_body {
self.wrapped.write(bytes!("0\r\n\r\n"));
}
}
}
impl<T: Reader> Reader for BufferedStream<T> {
/// Read at most N bytes into `buf`, where N is the minimum of `buf.len()` and the buffer size.
///
/// At present, this makes no attempt to fill its buffer proactively, instead waiting until you
/// ask.
fn read(&mut self, buf: &mut [u8]) -> Option<uint> {
if self.read_pos == self.read_max && !self.fill_buffer() {
// Run out of buffered content, no more to come
return None;
}
let size = min(self.read_max - self.read_pos, buf.len());
vec::bytes::copy_memory(buf, self.read_buffer.slice_from(self.read_pos).slice_to(size));
self.read_pos += size;
Some(size)
}
/// Return whether the Reader has reached the end of the stream AND exhausted its buffer.
fn eof(&mut self) -> bool {
self.read_pos == self.read_max && self.wrapped.eof()
}
}
impl<T: Writer> Writer for BufferedStream<T> {
fn write(&mut self, buf: &[u8]) {
if buf.len() + self.write_len > self.write_buffer.len() {
// This is the lazy approach which may involve multiple writes where it's really not
// warranted. Maybe deal with that later.
if self.writing_chunked_body {
let s = format!("{}\r\n", (self.write_len + buf.len()).to_str_radix(16));
self.wrapped.write(s.as_bytes());
}
if self.write_len > 0 {
self.wrapped.write(self.write_buffer.slice_to(self.write_len));
self.write_len = 0;
}
self.wrapped.write(buf);
self.write_len = 0;
if self.writing_chunked_body {
self.wrapped.write(bytes!("\r\n"));
}
} else {
unsafe { self.write_buffer.mut_slice_from(self.write_len).copy_memory(buf); }
self.write_len += buf.len();
if self.write_len == self.write_buffer.len() {
if self.writing_chunked_body {
let s = format!("{}\r\n", self.write_len.to_str_radix(16));
self.wrapped.write(s.as_bytes());
self.wrapped.write(self.write_buffer);
self.wrapped.write(bytes!("\r\n"));
} else {
self.wrapped.write(self.write_buffer);
}
self.write_len = 0;
}
}
}
fn flush(&mut self) {
if self.write_len > 0 {
if self.writing_chunked_body {
let s = format!("{}\r\n", self.write_len.to_str_radix(16));
self.wrapped.write(s.as_bytes());
}
self.wrapped.write(self.write_buffer.slice_to(self.write_len));
if self.writing_chunked_body {
self.wrapped.write(bytes!("\r\n"));
}
self.write_len = 0;
}
self.wrapped.flush();
}
}<|fim▁end|> | // Run out of buffered content, no more to come
return None;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.