prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>jquery.thumbs.js<|end_file_name|><|fim▁begin|>{
"name": "jquery.thumbs.js",
"url": "https://github.com/nfort/jquery.thumbs.js.git"<|fim▁hole|>}<|fim▁end|> | |
<|file_name|>tcpconnection.cpp<|end_file_name|><|fim▁begin|>//
// tcpconnection.cpp
//
// This implements RFC 793 with some changes in RFC 1122 and RFC 6298.
//
// Non-implemented features:
// dynamic receive window
// URG flag and urgent pointer
// delayed ACK
// queueing out-of-order TCP segments
// security/compartment
// precedence
// user timeout
//
// Circle - A C++ bare metal environment for Raspberry Pi
// Copyright (C) 2015-2021 R. Stange <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
#include <circle/net/tcpconnection.h>
#include <circle/macros.h>
#include <circle/util.h>
#include <circle/logger.h>
#include <circle/net/in.h>
#include <assert.h>
//#define TCP_DEBUG
#define TCP_MAX_CONNECTIONS 1000 // maximum number of active TCP connections
#define MSS_R 1480 // maximum segment size to be received from network layer
#define MSS_S 1480 // maximum segment size to be send to network layer
#define TCP_CONFIG_MSS (MSS_R - 20)
#define TCP_CONFIG_WINDOW (TCP_CONFIG_MSS * 10)
#define TCP_CONFIG_RETRANS_BUFFER_SIZE 0x10000 // should be greater than maximum send window size
#define TCP_MAX_WINDOW ((u16) -1) // without Window extension option
#define TCP_QUIET_TIME 30 // seconds after crash before another connection starts
#define HZ_TIMEWAIT (60 * HZ)
#define HZ_FIN_TIMEOUT (60 * HZ) // timeout in FIN-WAIT-2 state
#define MAX_RETRANSMISSIONS 5
struct TTCPHeader
{
u16 nSourcePort;
u16 nDestPort;
u32 nSequenceNumber;
u32 nAcknowledgmentNumber;
u16 nDataOffsetFlags; // following #define(s) are valid without BE()
#define TCP_DATA_OFFSET(field) (((field) >> 4) & 0x0F)
#define TCP_DATA_OFFSET_SHIFT 4
//#define TCP_FLAG_NONCE (1 << 0)
//#define TCP_FLAG_CWR (1 << 15)
//#define TCP_FLAG_ECN_ECHO (1 << 14)
#define TCP_FLAG_URGENT (1 << 13)
#define TCP_FLAG_ACK (1 << 12)
#define TCP_FLAG_PUSH (1 << 11)
#define TCP_FLAG_RESET (1 << 10)
#define TCP_FLAG_SYN (1 << 9)
#define TCP_FLAG_FIN (1 << 8)
u16 nWindow;
u16 nChecksum;
u16 nUrgentPointer;
u32 Options[];
}
PACKED;
#define TCP_HEADER_SIZE 20 // valid for normal data segments without TCP options
struct TTCPOption
{
u8 nKind; // Data:
#define TCP_OPTION_END_OF_LIST 0 // None (no length field)
#define TCP_OPTION_NOP 1 // None (no length field)
#define TCP_OPTION_MSS 2 // Maximum segment size (2 byte)
#define TCP_OPTION_WINDOW_SCALE 3 // Shift count (1 byte)
#define TCP_OPTION_SACK_PERM 4 // None
#define TCP_OPTION_TIMESTAMP 8 // Timestamp value, Timestamp echo reply (2*4 byte)
u8 nLength;
u8 Data[];
}
PACKED;
#define min(n, m) ((n) <= (m) ? (n) : (m))
#define max(n, m) ((n) >= (m) ? (n) : (m))
// Modulo 32 sequence number arithmetic
#define lt(x, y) ((int) ((u32) (x) - (u32) (y)) < 0)
#define le(x, y) ((int) ((u32) (x) - (u32) (y)) <= 0)
#define gt(x, y) lt (y, x)
#define ge(x, y) le (y, x)
#define bw(l, x, h) (lt ((l), (x)) && lt ((x), (h))) // between
#define bwl(l, x, h) (le ((l), (x)) && lt ((x), (h))) // low border inclusive
#define bwh(l, x, h) (lt ((l), (x)) && le ((x), (h))) // high border inclusive
#define bwlh(l, x, h) (le ((l), (x)) && le ((x), (h))) // both borders inclusive
#if !defined (NDEBUG) && defined (TCP_DEBUG)
#define NEW_STATE(state) NewState (state, __LINE__);
#else
#define NEW_STATE(state) (m_State = state)
#endif
#ifndef NDEBUG
#define UNEXPECTED_STATE() UnexpectedState (__LINE__)
#else
#define UNEXPECTED_STATE() ((void) 0)
#endif
unsigned CTCPConnection::s_nConnections = 0;
static const char FromTCP[] = "tcp";
CTCPConnection::CTCPConnection (CNetConfig *pNetConfig,
CNetworkLayer *pNetworkLayer,
CIPAddress &rForeignIP,
u16 nForeignPort,
u16 nOwnPort)
: CNetConnection (pNetConfig, pNetworkLayer, rForeignIP, nForeignPort, nOwnPort, IPPROTO_TCP),
m_bActiveOpen (TRUE),
m_State (TCPStateClosed),
m_nErrno (0),
m_RetransmissionQueue (TCP_CONFIG_RETRANS_BUFFER_SIZE),
m_bRetransmit (FALSE),
m_bSendSYN (FALSE),
m_bFINQueued (FALSE),
m_nRetransmissionCount (0),
m_bTimedOut (FALSE),
m_pTimer (CTimer::Get ()),
m_nSND_WND (TCP_CONFIG_WINDOW),
m_nSND_UP (0),
m_nRCV_NXT (0),
m_nRCV_WND (TCP_CONFIG_WINDOW),
m_nIRS (0),
m_nSND_MSS (536) // RFC 1122 section 4.2.2.6
{
s_nConnections++;
for (unsigned nTimer = TCPTimerUser; nTimer < TCPTimerUnknown; nTimer++)
{
m_hTimer[nTimer] = 0;
}
m_nISS = CalculateISN ();
m_RTOCalculator.Initialize (m_nISS);
m_nSND_UNA = m_nISS;
m_nSND_NXT = m_nISS+1;
if (SendSegment (TCP_FLAG_SYN, m_nISS))
{
m_RTOCalculator.SegmentSent (m_nISS);
NEW_STATE (TCPStateSynSent);
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
StartTimer (TCPTimerRetransmission, m_RTOCalculator.GetRTO ());
}
}
CTCPConnection::CTCPConnection (CNetConfig *pNetConfig,
CNetworkLayer *pNetworkLayer,
u16 nOwnPort)
: CNetConnection (pNetConfig, pNetworkLayer, nOwnPort, IPPROTO_TCP),
m_bActiveOpen (FALSE),
m_State (TCPStateListen),
m_nErrno (0),
m_RetransmissionQueue (TCP_CONFIG_RETRANS_BUFFER_SIZE),
m_bRetransmit (FALSE),
m_bSendSYN (FALSE),
m_bFINQueued (FALSE),
m_nRetransmissionCount (0),
m_bTimedOut (FALSE),
m_pTimer (CTimer::Get ()),
m_nSND_WND (TCP_CONFIG_WINDOW),
m_nSND_UP (0),
m_nRCV_NXT (0),
m_nRCV_WND (TCP_CONFIG_WINDOW),
m_nIRS (0),
m_nSND_MSS (536) // RFC 1122 section 4.2.2.6
{
s_nConnections++;
for (unsigned nTimer = TCPTimerUser; nTimer < TCPTimerUnknown; nTimer++)
{
m_hTimer[nTimer] = 0;
}
}
CTCPConnection::~CTCPConnection (void)
{
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug, "Delete TCB");
#endif
assert (m_State == TCPStateClosed);
for (unsigned nTimer = TCPTimerUser; nTimer < TCPTimerUnknown; nTimer++)
{
StopTimer (nTimer);
}
// ensure no task is waiting any more
m_Event.Set ();
m_TxEvent.Set ();
assert (s_nConnections > 0);
s_nConnections--;
}
int CTCPConnection::Connect (void)
{
if (m_nErrno < 0)
{
return m_nErrno;
}
switch (m_State)
{
case TCPStateSynSent:
case TCPStateSynReceived:
m_Event.Clear ();
m_Event.Wait ();
break;
case TCPStateEstablished:
break;
case TCPStateListen:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
UNEXPECTED_STATE ();
// fall through
case TCPStateClosed:
return -1;
}
return m_nErrno;
}
int CTCPConnection::Accept (CIPAddress *pForeignIP, u16 *pForeignPort)
{
if (m_nErrno < 0)
{
return m_nErrno;
}
switch (m_State)
{
case TCPStateSynSent:
UNEXPECTED_STATE ();
// fall through
case TCPStateClosed:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
return -1;
case TCPStateListen:
m_Event.Clear ();
m_Event.Wait ();
break;
case TCPStateSynReceived:
case TCPStateEstablished:
break;
}
assert (pForeignIP != 0);
pForeignIP->Set (m_ForeignIP);
assert (pForeignPort != 0);
*pForeignPort = m_nForeignPort;
return m_nErrno;
}
int CTCPConnection::Close (void)
{
if (m_nErrno < 0)
{
return m_nErrno;
}
switch (m_State)
{
case TCPStateClosed:
return -1;
case TCPStateListen:
case TCPStateSynSent:
StopTimer (TCPTimerRetransmission);
NEW_STATE (TCPStateClosed);
break;
case TCPStateSynReceived:
case TCPStateEstablished:
assert (!m_bFINQueued);
m_StateAfterFIN = TCPStateFinWait1;
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
m_bFINQueued = TRUE;
break;
case TCPStateFinWait1:
case TCPStateFinWait2:
break;
case TCPStateCloseWait:
assert (!m_bFINQueued);
m_StateAfterFIN = TCPStateLastAck; // RFC 1122 section 4.2.2.20 (a)
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
m_bFINQueued = TRUE;
break;
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
return -1;
}
if (m_nErrno < 0)
{
return m_nErrno;
}
return 0;
}
int CTCPConnection::Send (const void *pData, unsigned nLength, int nFlags)
{
if ( nFlags != 0
&& nFlags != MSG_DONTWAIT)
{
return -1;
}
if (m_nErrno < 0)
{
return m_nErrno;
}
switch (m_State)
{
case TCPStateClosed:
case TCPStateListen:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
return -1;
case TCPStateSynSent:
case TCPStateSynReceived:
case TCPStateEstablished:
case TCPStateCloseWait:
break;
}
unsigned nResult = nLength;
assert (pData != 0);
u8 *pBuffer = (u8 *) pData;
while (nLength > FRAME_BUFFER_SIZE)
{
m_TxQueue.Enqueue (pBuffer, FRAME_BUFFER_SIZE);
pBuffer += FRAME_BUFFER_SIZE;
nLength -= FRAME_BUFFER_SIZE;
}
if (nLength > 0)
{
m_TxQueue.Enqueue (pBuffer, nLength);
}
if (!(nFlags & MSG_DONTWAIT))
{
m_TxEvent.Clear ();
m_TxEvent.Wait ();
if (m_nErrno < 0)
{
return m_nErrno;
}
}
return nResult;
}
int CTCPConnection::Receive (void *pBuffer, int nFlags)
{
if ( nFlags != 0
&& nFlags != MSG_DONTWAIT)
{
return -1;
}
if (m_nErrno < 0)
{
return m_nErrno;
}
unsigned nLength;
while ((nLength = m_RxQueue.Dequeue (pBuffer)) == 0)
{
switch (m_State)
{
case TCPStateClosed:
case TCPStateListen:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
return -1;
case TCPStateSynSent:
case TCPStateSynReceived:
case TCPStateEstablished:
break;
}
if (nFlags & MSG_DONTWAIT)
{
return 0;
}
m_Event.Clear ();
m_Event.Wait ();
if (m_nErrno < 0)
{
return m_nErrno;
}
}
return nLength;
}
int CTCPConnection::SendTo (const void *pData, unsigned nLength, int nFlags,
CIPAddress &rForeignIP, u16 nForeignPort)
{
// ignore rForeignIP and nForeignPort
return Send (pData, nLength, nFlags);
}
int CTCPConnection::ReceiveFrom (void *pBuffer, int nFlags, CIPAddress *pForeignIP, u16 *pForeignPort)
{
int nResult = Receive (pBuffer, nFlags);
if (nResult <= 0)
{
return nResult;
}
if ( pForeignIP != 0
&& pForeignPort != 0)
{
pForeignIP->Set (m_ForeignIP);
*pForeignPort = m_nForeignPort;
}
return 0;
}
int CTCPConnection::SetOptionBroadcast (boolean bAllowed)
{
return 0;
}
boolean CTCPConnection::IsConnected (void) const
{
return m_State > TCPStateSynSent
&& m_State != TCPStateTimeWait;
}
boolean CTCPConnection::IsTerminated (void) const
{
return m_State == TCPStateClosed;
}
void CTCPConnection::Process (void)
{
if (m_bTimedOut)
{
m_nErrno = -1;
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return;
}
switch (m_State)
{
case TCPStateClosed:
case TCPStateListen:
case TCPStateFinWait2:
case TCPStateTimeWait:
return;
case TCPStateSynSent:
case TCPStateSynReceived:
if (m_bSendSYN)
{
m_bSendSYN = FALSE;
if (m_State == TCPStateSynSent)
{
SendSegment (TCP_FLAG_SYN, m_nISS);
}
else
{
SendSegment (TCP_FLAG_SYN | TCP_FLAG_ACK, m_nISS, m_nRCV_NXT);
}
m_RTOCalculator.SegmentSent (m_nISS);
StartTimer (TCPTimerRetransmission, m_RTOCalculator.GetRTO ());
}
return;
case TCPStateEstablished:
case TCPStateFinWait1:
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
if ( m_RetransmissionQueue.IsEmpty ()
&& m_TxQueue.IsEmpty ()
&& m_bFINQueued)
{
SendSegment (TCP_FLAG_FIN | TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
m_RTOCalculator.SegmentSent (m_nSND_NXT);
m_nSND_NXT++;
NEW_STATE (m_StateAfterFIN);
m_bFINQueued = FALSE;
StartTimer (TCPTimerRetransmission, m_RTOCalculator.GetRTO ());
}
break;
}
u8 TempBuffer[FRAME_BUFFER_SIZE];
unsigned nLength;
while ( m_RetransmissionQueue.GetFreeSpace () >= FRAME_BUFFER_SIZE
&& (nLength = m_TxQueue.Dequeue (TempBuffer)) > 0)
{
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug, "Transfering %u bytes into RT buffer", nLength);
#endif
m_RetransmissionQueue.Write (TempBuffer, nLength);
}
// pacing transmit
if ( ( m_State == TCPStateEstablished
|| m_State == TCPStateCloseWait)
&& m_TxQueue.IsEmpty ())
{
m_TxEvent.Set ();
}
if (m_bRetransmit)
{
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug, "Retransmission (nxt %u, una %u)", m_nSND_NXT-m_nISS, m_nSND_UNA-m_nISS);
#endif
m_bRetransmit = FALSE;
m_RetransmissionQueue.Reset ();
m_nSND_NXT = m_nSND_UNA;
}
u32 nBytesAvail;
u32 nWindowLeft;
while ( (nBytesAvail = m_RetransmissionQueue.GetBytesAvailable ()) > 0
&& (nWindowLeft = m_nSND_UNA+m_nSND_WND-m_nSND_NXT) > 0)
{
nLength = min (nBytesAvail, nWindowLeft);
nLength = min (nLength, m_nSND_MSS);
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug, "Transfering %u bytes into TX buffer", nLength);
#endif
assert (nLength <= FRAME_BUFFER_SIZE);
m_RetransmissionQueue.Read (TempBuffer, nLength);
unsigned nFlags = TCP_FLAG_ACK;
if (m_TxQueue.IsEmpty ())
{
nFlags |= TCP_FLAG_PUSH;
}
SendSegment (nFlags, m_nSND_NXT, m_nRCV_NXT, TempBuffer, nLength);
m_RTOCalculator.SegmentSent (m_nSND_NXT, nLength);
m_nSND_NXT += nLength;
StartTimer (TCPTimerRetransmission, m_RTOCalculator.GetRTO ());
}
}
int CTCPConnection::PacketReceived (const void *pPacket,
unsigned nLength,
CIPAddress &rSenderIP,
CIPAddress &rReceiverIP,
int nProtocol)
{
if (nProtocol != IPPROTO_TCP)
{
return 0;
}
if (nLength < sizeof (TTCPHeader))
{
return -1;
}
assert (pPacket != 0);
TTCPHeader *pHeader = (TTCPHeader *) pPacket;
if (m_nOwnPort != be2le16 (pHeader->nDestPort))
{
return 0;
}
if (m_State != TCPStateListen)
{
if ( m_ForeignIP != rSenderIP
|| m_nForeignPort != be2le16 (pHeader->nSourcePort))
{
return 0;
}
}
else
{
if (!(pHeader->nDataOffsetFlags & TCP_FLAG_SYN))
{
return 0;
}
m_Checksum.SetDestinationAddress (rSenderIP);
}
if (m_Checksum.Calculate (pPacket, nLength) != CHECKSUM_OK)
{
return 0;
}
u16 nFlags = pHeader->nDataOffsetFlags;
u32 nDataOffset = TCP_DATA_OFFSET (pHeader->nDataOffsetFlags)*4;
u32 nDataLength = nLength-nDataOffset;
// Current Segment Variables
u32 nSEG_SEQ = be2le32 (pHeader->nSequenceNumber);
u32 nSEG_ACK = be2le32 (pHeader->nAcknowledgmentNumber);
u32 nSEG_LEN = nDataLength;
if (nFlags & TCP_FLAG_SYN)
{
nSEG_LEN++;
}
if (nFlags & TCP_FLAG_FIN)
{
nSEG_LEN++;
}
u32 nSEG_WND = be2le16 (pHeader->nWindow);
//u16 nSEG_UP = be2le16 (pHeader->nUrgentPointer);
//u32 nSEG_PRC; // segment precedence value
ScanOptions (pHeader);
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug,
"rx %c%c%c%c%c%c, seq %u, ack %u, win %u, len %u",
nFlags & TCP_FLAG_URGENT ? 'U' : '-',
nFlags & TCP_FLAG_ACK ? 'A' : '-',
nFlags & TCP_FLAG_PUSH ? 'P' : '-',
nFlags & TCP_FLAG_RESET ? 'R' : '-',
nFlags & TCP_FLAG_SYN ? 'S' : '-',
nFlags & TCP_FLAG_FIN ? 'F' : '-',
nSEG_SEQ-m_nIRS,
nFlags & TCP_FLAG_ACK ? nSEG_ACK-m_nISS : 0,
nSEG_WND,
nDataLength);
DumpStatus ();
#endif
boolean bAcceptable = FALSE;
// RFC 793 section 3.9 "SEGMENT ARRIVES"
switch (m_State)
{
case TCPStateClosed:
if (nFlags & TCP_FLAG_RESET)
{
// ignore
}
else if (!(nFlags & TCP_FLAG_ACK))
{
m_ForeignIP.Set (rSenderIP);
m_nForeignPort = be2le16 (pHeader->nSourcePort);
m_Checksum.SetDestinationAddress (rSenderIP);
SendSegment (TCP_FLAG_RESET | TCP_FLAG_ACK, 0, nSEG_SEQ+nSEG_LEN);
}
else
{
m_ForeignIP.Set (rSenderIP);
m_nForeignPort = be2le16 (pHeader->nSourcePort);
m_Checksum.SetDestinationAddress (rSenderIP);
SendSegment (TCP_FLAG_RESET, nSEG_ACK);
}
break;
case TCPStateListen:
if (nFlags & TCP_FLAG_RESET)
{
// ignore
}
else if (nFlags & TCP_FLAG_ACK)
{
m_ForeignIP.Set (rSenderIP);
m_nForeignPort = be2le16 (pHeader->nSourcePort);
m_Checksum.SetDestinationAddress (rSenderIP);
SendSegment (TCP_FLAG_RESET, nSEG_ACK);
}
else if (nFlags & TCP_FLAG_SYN)
{
if (s_nConnections >= TCP_MAX_CONNECTIONS)
{
m_ForeignIP.Set (rSenderIP);
m_nForeignPort = be2le16 (pHeader->nSourcePort);
m_Checksum.SetDestinationAddress (rSenderIP);
SendSegment (TCP_FLAG_RESET | TCP_FLAG_ACK, 0, nSEG_SEQ+nSEG_LEN);
break;
}
m_nRCV_NXT = nSEG_SEQ+1;
m_nIRS = nSEG_SEQ;
m_nSND_WND = nSEG_WND;
m_nSND_WL1 = nSEG_SEQ;
m_nSND_WL2 = nSEG_ACK;
assert (nSEG_LEN > 0);
if (nDataLength > 0)
{
m_RxQueue.Enqueue ((u8 *) pPacket+nDataOffset, nDataLength);
}
m_nISS = CalculateISN ();
m_RTOCalculator.Initialize (m_nISS);
m_ForeignIP.Set (rSenderIP);
m_nForeignPort = be2le16 (pHeader->nSourcePort);
m_Checksum.SetDestinationAddress (rSenderIP);
SendSegment (TCP_FLAG_SYN | TCP_FLAG_ACK, m_nISS, m_nRCV_NXT);
m_RTOCalculator.SegmentSent (m_nISS);
m_nSND_NXT = m_nISS+1;
m_nSND_UNA = m_nISS;
NEW_STATE (TCPStateSynReceived);
m_Event.Set ();
}
break;
case TCPStateSynSent:
if (nFlags & TCP_FLAG_ACK)
{
if (!bwh (m_nISS, nSEG_ACK, m_nSND_NXT))
{
if (!(nFlags & TCP_FLAG_RESET))
{
SendSegment (TCP_FLAG_RESET, nSEG_ACK);
}
return 1;
}
else if (bwlh (m_nSND_UNA, nSEG_ACK, m_nSND_NXT))
{
bAcceptable = TRUE;
}
}
if (nFlags & TCP_FLAG_RESET)
{
if (bAcceptable)
{
NEW_STATE (TCPStateClosed);
m_bSendSYN = FALSE;
m_nErrno = -1;
m_Event.Set ();
}
break;
}
if ( (nFlags & TCP_FLAG_ACK)
&& !bAcceptable)
{
break;
}
if (nFlags & TCP_FLAG_SYN)
{
m_nRCV_NXT = nSEG_SEQ+1;
m_nIRS = nSEG_SEQ;
if (nFlags & TCP_FLAG_ACK)
{
m_RTOCalculator.SegmentAcknowledged (nSEG_ACK);
if (nSEG_ACK-m_nSND_UNA > 1)
{
m_RetransmissionQueue.Advance (nSEG_ACK-m_nSND_UNA-1);
}
m_nSND_UNA = nSEG_ACK;
}
if (gt (m_nSND_UNA, m_nISS))
{
NEW_STATE (TCPStateEstablished);
m_bSendSYN = FALSE;
StopTimer (TCPTimerRetransmission);
// next transmission starts with this count
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
m_Event.Set ();
// RFC 1122 section 4.2.2.20 (c)
m_nSND_WND = nSEG_WND;
m_nSND_WL1 = nSEG_SEQ;
m_nSND_WL2 = nSEG_ACK;
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
if ( (nFlags & TCP_FLAG_FIN) // other controls?
|| nDataLength > 0)
{
goto StepSix;
}
break;
}
else
{
NEW_STATE (TCPStateSynReceived);
m_bSendSYN = FALSE;
SendSegment (TCP_FLAG_SYN | TCP_FLAG_ACK, m_nISS, m_nRCV_NXT);
m_RTOCalculator.SegmentSent (m_nISS);
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
StartTimer (TCPTimerRetransmission, m_RTOCalculator.GetRTO ());
if ( (nFlags & TCP_FLAG_FIN) // other controls?
|| nDataLength > 0)
{
if (nFlags & TCP_FLAG_FIN)
{
SendSegment (TCP_FLAG_RESET, m_nSND_NXT);
NEW_STATE (TCPStateClosed);
m_nErrno = -1;
m_Event.Set ();
}
if (nDataLength > 0)
{
m_RxQueue.Enqueue ((u8 *) pPacket+nDataOffset, nDataLength);
}
break;
}
}
}
break;
case TCPStateSynReceived:
case TCPStateEstablished:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
// step 1 ( check sequence number)
if (m_nRCV_WND > 0)
{
if (nSEG_LEN == 0)
{
if (bwl (m_nRCV_NXT, nSEG_SEQ, m_nRCV_NXT+m_nRCV_WND))
{
bAcceptable = TRUE;
}
}
else
{
if ( bwl (m_nRCV_NXT, nSEG_SEQ, m_nRCV_NXT+m_nRCV_WND)
|| bwl (m_nRCV_NXT, nSEG_SEQ+nSEG_LEN-1, m_nRCV_NXT+m_nRCV_WND))
{
bAcceptable = TRUE;
}
}
}
else
{
if (nSEG_LEN == 0)
{
if (nSEG_SEQ == m_nRCV_NXT)
{
bAcceptable = TRUE;
}
}
}
if ( !bAcceptable
&& m_State != TCPStateSynReceived)
{
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
break;
}
// step 2 (check RST bit)
if (nFlags & TCP_FLAG_RESET)
{
switch (m_State)
{
case TCPStateSynReceived:
m_RetransmissionQueue.Flush ();
if (!m_bActiveOpen)
{
NEW_STATE (TCPStateListen);
return 1;
}
else
{
m_nErrno = -1;
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return 1;
}
break;
case TCPStateEstablished:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
m_nErrno = -1;
m_RetransmissionQueue.Flush ();
m_TxQueue.Flush ();
m_RxQueue.Flush ();
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return 1;
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return 1;
default:
UNEXPECTED_STATE ();
return 1;
}
}
// step 3 (check security and precedence, not supported)
// step 4 (check SYN bit)
if (nFlags & TCP_FLAG_SYN)
{
// RFC 1122 section 4.2.2.20 (e)
if ( m_State == TCPStateSynReceived
&& !m_bActiveOpen)
{
NEW_STATE (TCPStateListen);
return 1;
}
SendSegment (TCP_FLAG_RESET, m_nSND_NXT);
m_nErrno = -1;
m_RetransmissionQueue.Flush ();
m_TxQueue.Flush ();
m_RxQueue.Flush ();
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return 1;
}
// step 5 (check ACK field)
if (!(nFlags & TCP_FLAG_ACK))
{
return 1;
}
switch (m_State)
{
case TCPStateSynReceived:
if (bwlh (m_nSND_UNA, nSEG_ACK, m_nSND_NXT))
{
// RFC 1122 section 4.2.2.20 (f)
m_nSND_WND = nSEG_WND;
m_nSND_WL1 = nSEG_SEQ;
m_nSND_WL2 = nSEG_ACK;
m_nSND_UNA = nSEG_ACK; // got ACK for SYN
m_RTOCalculator.SegmentAcknowledged (nSEG_ACK);
NEW_STATE (TCPStateEstablished);
// next transmission starts with this count
m_nRetransmissionCount = MAX_RETRANSMISSIONS;
}
else
{
SendSegment (TCP_FLAG_RESET, nSEG_ACK);
}
break;
case TCPStateEstablished:
case TCPStateFinWait1:
case TCPStateFinWait2:
case TCPStateCloseWait:
case TCPStateClosing:
if (bwh (m_nSND_UNA, nSEG_ACK, m_nSND_NXT))
{
m_RTOCalculator.SegmentAcknowledged (nSEG_ACK);
unsigned nBytesAck = nSEG_ACK-m_nSND_UNA;
m_nSND_UNA = nSEG_ACK;
if (nSEG_ACK == m_nSND_NXT) // all segments are acknowledged
{
StopTimer (TCPTimerRetransmission);
// next transmission starts with this count<|fim▁hole|> m_nRetransmissionCount = MAX_RETRANSMISSIONS;
}
if ( m_State == TCPStateFinWait1
|| m_State == TCPStateClosing)
{
nBytesAck--; // acknowledged FIN does not count
m_bFINQueued = FALSE;
}
if ( m_State == TCPStateEstablished
&& nBytesAck == 1)
{
nBytesAck--;
}
if (nBytesAck > 0)
{
m_RetransmissionQueue.Advance (nBytesAck);
}
// update send window
if ( lt (m_nSND_WL1, nSEG_SEQ)
|| ( m_nSND_WL1 == nSEG_SEQ
&& le (m_nSND_WL2, nSEG_ACK)))
{
m_nSND_WND = nSEG_WND;
m_nSND_WL1 = nSEG_SEQ;
m_nSND_WL2 = nSEG_ACK;
}
}
else if (le (nSEG_ACK, m_nSND_UNA)) // RFC 1122 section 4.2.2.20 (g)
{
// ignore duplicate ACK ...
// RFC 1122 section 4.2.2.20 (g)
if (bwlh (m_nSND_UNA, nSEG_ACK, m_nSND_NXT))
{
// ... but update send window
if ( lt (m_nSND_WL1, nSEG_SEQ)
|| ( m_nSND_WL1 == nSEG_SEQ
&& le (m_nSND_WL2, nSEG_ACK)))
{
m_nSND_WND = nSEG_WND;
m_nSND_WL1 = nSEG_SEQ;
m_nSND_WL2 = nSEG_ACK;
}
}
}
else if (gt (nSEG_ACK, m_nSND_NXT))
{
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
return 1;
}
switch (m_State)
{
case TCPStateEstablished:
case TCPStateCloseWait:
break;
case TCPStateFinWait1:
if (nSEG_ACK == m_nSND_NXT) // if our FIN is now acknowledged
{
m_RTOCalculator.SegmentAcknowledged (nSEG_ACK);
m_bFINQueued = FALSE;
StopTimer (TCPTimerRetransmission);
NEW_STATE (TCPStateFinWait2);
StartTimer (TCPTimerTimeWait, HZ_FIN_TIMEOUT);
}
else
{
break;
}
// fall through
case TCPStateFinWait2:
if (m_RetransmissionQueue.IsEmpty ())
{
m_Event.Set ();
}
break;
case TCPStateClosing:
if (nSEG_ACK == m_nSND_NXT) // if our FIN is now acknowledged
{
m_RTOCalculator.SegmentAcknowledged (nSEG_ACK);
m_bFINQueued = FALSE;
StopTimer (TCPTimerRetransmission);
NEW_STATE (TCPStateTimeWait);
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
}
break;
default:
UNEXPECTED_STATE ();
break;
}
break;
case TCPStateLastAck:
if (nSEG_ACK == m_nSND_NXT) // if our FIN is now acknowledged
{
m_bFINQueued = FALSE;
NEW_STATE (TCPStateClosed);
m_Event.Set ();
return 1;
}
break;
case TCPStateTimeWait:
if (nSEG_ACK == m_nSND_NXT) // if our FIN is now acknowledged
{
m_bFINQueued = FALSE;
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
}
break;
default:
UNEXPECTED_STATE ();
break;
}
// step 6 (check URG bit, not supported)
StepSix:
// step 7 (process text segment)
if (nSEG_LEN == 0)
{
return 1;
}
switch (m_State)
{
case TCPStateEstablished:
case TCPStateFinWait1:
case TCPStateFinWait2:
if (nSEG_SEQ == m_nRCV_NXT)
{
if (nDataLength > 0)
{
m_RxQueue.Enqueue ((u8 *) pPacket+nDataOffset, nDataLength);
m_nRCV_NXT += nDataLength;
// m_nRCV_WND should be adjusted here (section 3.7)
// following ACK could be piggybacked with data
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
if (nFlags & TCP_FLAG_PUSH)
{
m_Event.Set ();
}
}
}
else
{
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
return 1;
}
break;
case TCPStateSynReceived: // this state not in RFC 793
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
case TCPStateTimeWait:
break;
default:
UNEXPECTED_STATE ();
break;
}
// step 8 (check FIN bit)
if ( m_State == TCPStateClosed
|| m_State == TCPStateListen
|| m_State == TCPStateSynSent)
{
return 1;
}
if (!(nFlags & TCP_FLAG_FIN))
{
return 1;
}
// connection is closing
m_nRCV_NXT++;
SendSegment (TCP_FLAG_ACK, m_nSND_NXT, m_nRCV_NXT);
switch (m_State)
{
case TCPStateSynReceived:
case TCPStateEstablished:
NEW_STATE (TCPStateCloseWait);
m_Event.Set ();
break;
case TCPStateFinWait1:
if (nSEG_ACK == m_nSND_NXT) // if our FIN is now acknowledged
{
m_bFINQueued = FALSE;
StopTimer (TCPTimerRetransmission);
StopTimer (TCPTimerUser);
NEW_STATE (TCPStateTimeWait);
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
}
else
{
NEW_STATE (TCPStateClosing);
}
break;
case TCPStateFinWait2:
StopTimer (TCPTimerRetransmission);
StopTimer (TCPTimerUser);
NEW_STATE (TCPStateTimeWait);
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
break;
case TCPStateCloseWait:
case TCPStateClosing:
case TCPStateLastAck:
break;
case TCPStateTimeWait:
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
break;
default:
UNEXPECTED_STATE ();
break;
}
break;
}
return 1;
}
int CTCPConnection::NotificationReceived (TICMPNotificationType Type,
CIPAddress &rSenderIP,
CIPAddress &rReceiverIP,
u16 nSendPort,
u16 nReceivePort,
int nProtocol)
{
if (nProtocol != IPPROTO_TCP)
{
return 0;
}
if (m_State < TCPStateSynSent)
{
return 0;
}
if ( m_ForeignIP != rSenderIP
|| m_nForeignPort != nSendPort)
{
return 0;
}
assert (m_pNetConfig != 0);
if ( rReceiverIP != *m_pNetConfig->GetIPAddress ()
|| m_nOwnPort != nReceivePort)
{
return 0;
}
m_nErrno = -1;
StopTimer (TCPTimerRetransmission);
NEW_STATE (TCPStateTimeWait);
StartTimer (TCPTimerTimeWait, HZ_TIMEWAIT);
m_Event.Set ();
return 1;
}
boolean CTCPConnection::SendSegment (unsigned nFlags, u32 nSequenceNumber, u32 nAcknowledgmentNumber,
const void *pData, unsigned nDataLength)
{
unsigned nDataOffset = 5;
assert (nDataOffset * 4 == sizeof (TTCPHeader));
if (nFlags & TCP_FLAG_SYN)
{
nDataOffset++;
}
unsigned nHeaderLength = nDataOffset * 4;
unsigned nPacketLength = nHeaderLength + nDataLength; // may wrap
assert (nPacketLength >= nHeaderLength);
assert (nHeaderLength <= FRAME_BUFFER_SIZE);
u8 TxBuffer[FRAME_BUFFER_SIZE];
TTCPHeader *pHeader = (TTCPHeader *) TxBuffer;
pHeader->nSourcePort = le2be16 (m_nOwnPort);
pHeader->nDestPort = le2be16 (m_nForeignPort);
pHeader->nSequenceNumber = le2be32 (nSequenceNumber);
pHeader->nAcknowledgmentNumber = nFlags & TCP_FLAG_ACK ? le2be32 (nAcknowledgmentNumber) : 0;
pHeader->nDataOffsetFlags = (nDataOffset << TCP_DATA_OFFSET_SHIFT) | nFlags;
pHeader->nWindow = le2be16 (m_nRCV_WND);
pHeader->nUrgentPointer = le2be16 (m_nSND_UP);
if (nFlags & TCP_FLAG_SYN)
{
TTCPOption *pOption = (TTCPOption *) pHeader->Options;
pOption->nKind = TCP_OPTION_MSS;
pOption->nLength = 4;
pOption->Data[0] = TCP_CONFIG_MSS >> 8;
pOption->Data[1] = TCP_CONFIG_MSS & 0xFF;
}
if (nDataLength > 0)
{
assert (pData != 0);
memcpy (TxBuffer+nHeaderLength, pData, nDataLength);
}
pHeader->nChecksum = 0; // must be 0 for calculation
pHeader->nChecksum = m_Checksum.Calculate (TxBuffer, nPacketLength);
#ifdef TCP_DEBUG
CLogger::Get ()->Write (FromTCP, LogDebug,
"tx %c%c%c%c%c%c, seq %u, ack %u, win %u, len %u",
nFlags & TCP_FLAG_URGENT ? 'U' : '-',
nFlags & TCP_FLAG_ACK ? 'A' : '-',
nFlags & TCP_FLAG_PUSH ? 'P' : '-',
nFlags & TCP_FLAG_RESET ? 'R' : '-',
nFlags & TCP_FLAG_SYN ? 'S' : '-',
nFlags & TCP_FLAG_FIN ? 'F' : '-',
nSequenceNumber-m_nISS,
nFlags & TCP_FLAG_ACK ? nAcknowledgmentNumber-m_nIRS : 0,
m_nRCV_WND,
nDataLength);
#endif
assert (m_pNetworkLayer != 0);
return m_pNetworkLayer->Send (m_ForeignIP, TxBuffer, nPacketLength, IPPROTO_TCP);
}
void CTCPConnection::ScanOptions (TTCPHeader *pHeader)
{
assert (pHeader != 0);
unsigned nDataOffset = TCP_DATA_OFFSET (pHeader->nDataOffsetFlags)*4;
u8 *pHeaderEnd = (u8 *) pHeader+nDataOffset;
TTCPOption *pOption = (TTCPOption *) pHeader->Options;
while ((u8 *) pOption+2 <= pHeaderEnd)
{
switch (pOption->nKind)
{
case TCP_OPTION_END_OF_LIST:
return;
case TCP_OPTION_NOP:
pOption = (TTCPOption *) ((u8 *) pOption+1);
break;
case TCP_OPTION_MSS:
if ( pOption->nLength == 4
&& (u8 *) pOption+4 <= pHeaderEnd)
{
u32 nMSS = (u16) pOption->Data[0] << 8 | pOption->Data[1];
// RFC 1122 section 4.2.2.6
nMSS = min (nMSS+20, MSS_S) - TCP_HEADER_SIZE - IP_OPTION_SIZE;
if (nMSS >= 10) // self provided sanity check
{
m_nSND_MSS = (u16) nMSS;
}
}
// fall through
default:
pOption = (TTCPOption *) ((u8 *) pOption+pOption->nLength);
break;
}
}
}
u32 CTCPConnection::CalculateISN (void)
{
assert (m_pTimer != 0);
return ( m_pTimer->GetTime () * HZ
+ m_pTimer->GetTicks () % HZ)
* (TCP_MAX_WINDOW / TCP_QUIET_TIME / HZ);
}
void CTCPConnection::StartTimer (unsigned nTimer, unsigned nHZ)
{
assert (nTimer < TCPTimerUnknown);
assert (nHZ > 0);
assert (m_pTimer != 0);
StopTimer (nTimer);
m_hTimer[nTimer] = m_pTimer->StartKernelTimer (nHZ, TimerStub, (void *) (uintptr) nTimer, this);
}
void CTCPConnection::StopTimer (unsigned nTimer)
{
assert (nTimer < TCPTimerUnknown);
assert (m_pTimer != 0);
m_TimerSpinLock.Acquire ();
if (m_hTimer[nTimer] != 0)
{
m_pTimer->CancelKernelTimer (m_hTimer[nTimer]);
m_hTimer[nTimer] = 0;
}
m_TimerSpinLock.Release ();
}
void CTCPConnection::TimerHandler (unsigned nTimer)
{
assert (nTimer < TCPTimerUnknown);
m_TimerSpinLock.Acquire ();
if (m_hTimer[nTimer] == 0) // timer was stopped in the meantime
{
m_TimerSpinLock.Release ();
return;
}
m_hTimer[nTimer] = 0;
m_TimerSpinLock.Release ();
switch (nTimer)
{
case TCPTimerRetransmission:
m_RTOCalculator.RetransmissionTimerExpired ();
if (m_nRetransmissionCount-- == 0)
{
m_bTimedOut = TRUE;
break;
}
switch (m_State)
{
case TCPStateClosed:
case TCPStateListen:
case TCPStateFinWait2:
case TCPStateTimeWait:
UNEXPECTED_STATE ();
break;
case TCPStateSynSent:
case TCPStateSynReceived:
assert (!m_bSendSYN);
m_bSendSYN = TRUE;
break;
case TCPStateEstablished:
case TCPStateCloseWait:
assert (!m_bRetransmit);
m_bRetransmit = TRUE;
break;
case TCPStateFinWait1:
case TCPStateClosing:
case TCPStateLastAck:
assert (!m_bFINQueued);
m_bFINQueued = TRUE;
break;
}
break;
case TCPTimerTimeWait:
NEW_STATE (TCPStateClosed);
break;
case TCPTimerUser:
case TCPTimerUnknown:
assert (0);
break;
}
}
void CTCPConnection::TimerStub (TKernelTimerHandle hTimer, void *pParam, void *pContext)
{
CTCPConnection *pThis = (CTCPConnection *) pContext;
assert (pThis != 0);
unsigned nTimer = (unsigned) (uintptr) pParam;
assert (nTimer < TCPTimerUnknown);
pThis->TimerHandler (nTimer);
}
#ifndef NDEBUG
void CTCPConnection::DumpStatus (void)
{
CLogger::Get ()->Write (FromTCP, LogDebug,
"sta %u, una %u, snx %u, swn %u, rnx %u, rwn %u, fprt %u",
m_State,
m_nSND_UNA-m_nISS,
m_nSND_NXT-m_nISS,
m_nSND_WND,
m_nRCV_NXT-m_nIRS,
m_nRCV_WND,
(unsigned) m_nForeignPort);
}
TTCPState CTCPConnection::NewState (TTCPState State, unsigned nLine)
{
const static char *StateName[] = // must match TTCPState
{
"CLOSED",
"LISTEN",
"SYN-SENT",
"SYN-RECEIVED",
"ESTABLISHED",
"FIN-WAIT-1",
"FIN-WAIT-2",
"CLOSE-WAIT",
"CLOSING",
"LAST-ACK",
"TIME-WAIT"
};
assert (m_State < sizeof StateName / sizeof StateName[0]);
assert (State < sizeof StateName / sizeof StateName[0]);
CLogger::Get ()->Write (FromTCP, LogDebug, "State %s -> %s at line %u", StateName[m_State], StateName[State], nLine);
return m_State = State;
}
void CTCPConnection::UnexpectedState (unsigned nLine)
{
DumpStatus ();
CLogger::Get ()->Write (FromTCP, LogPanic, "Unexpected state %u at line %u", m_State, nLine);
}
#endif<|fim▁end|> | |
<|file_name|>test-localizer.js<|end_file_name|><|fim▁begin|>import globalize from 'globalize';
import configure from '../src/configure';
export default function testLocalizer() {
function getCulture(culture){
return culture ? globalize.findClosestCulture(culture) : globalize.culture()
}
function shortDay(dayOfTheWeek, culture) {<|fim▁hole|> return names[dayOfTheWeek.getDay()];
}
var date = {
formats: {
date: 'd',
time: 't',
default: 'f',
header: 'MMMM yyyy',
footer: 'D',
weekday: shortDay,
dayOfMonth: 'dd',
month: 'MMM',
year: 'yyyy',
decade: 'yyyy',
century: 'yyyy',
},
firstOfWeek(culture) {
culture = getCulture(culture)
return (culture && culture.calendar.firstDay) || 0
},
parse(value, format, culture){
return globalize.parseDate(value, format, culture)
},
format(value, format, culture){
return globalize.format(value, format, culture)
}
}
function formatData(format, _culture){
var culture = getCulture(_culture)
, numFormat = culture.numberFormat
if (typeof format === 'string') {
if (format.indexOf('p') !== -1) numFormat = numFormat.percent
if (format.indexOf('c') !== -1) numFormat = numFormat.curency
}
return numFormat
}
var number = {
formats: {
default: 'D'
},
parse(value, culture) {
return globalize.parseFloat(value, 10, culture)
},
format(value, format, culture){
return globalize.format(value, format, culture)
},
decimalChar(format, culture){
var data = formatData(format, culture)
return data['.'] || '.'
},
precision(format, _culture){
var data = formatData(format, _culture)
if (typeof format === 'string' && format.length > 1)
return parseFloat(format.substr(1))
return data ? data.decimals : null
}
}
configure.setLocalizers({ date, number })
}<|fim▁end|> | let names = getCulture(culture).calendar.days.namesShort; |
<|file_name|>gen_sync.py<|end_file_name|><|fim▁begin|>#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
# <<< imports
# @generated
from dynamics.dynamics.rotating_machine import RotatingMachine
from google.appengine.ext import db<|fim▁hole|>class GenSync(RotatingMachine):
""" Synchronous generator model. A single standard synchronous model is defined for the CIM, with several variations indicated by the 'model type' attribute. This model can be used for all types of synchronous machines (salient pole, solid iron rotor, etc.).
"""
# <<< gen_sync.attributes
# @generated
# >>> gen_sync.attributes
# <<< gen_sync.references
# @generated
# >>> gen_sync.references
# <<< gen_sync.operations
# @generated
# >>> gen_sync.operations
# EOF -------------------------------------------------------------------------<|fim▁end|> | # >>> imports
|
<|file_name|>v1.js<|end_file_name|><|fim▁begin|>/**
* DO NOT EDIT THIS FILE as it will be overwritten by the Pbj compiler.
* @link https://github.com/gdbots/pbjc-php
*
* Returns an array of curies using mixin "gdbots:ncr:mixin:node-marked-as-draft:v1"
* @link http://schemas.triniti.io/json-schema/gdbots/ncr/mixin/node-marked-as-draft/1-0-0.json#<|fim▁hole|><|fim▁end|> | */
export default [
]; |
<|file_name|>resource-cycle2.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Don't leak the unique pointers
use std::cast;
struct U {
a: int,
b: int,
c: *int
}
struct r {
v: U,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
let _v2: ~int = cast::transmute(self.v.c);
}
}
}
fn r(v: U) -> r {
r {
v: v
}
}
struct t(Node);
struct Node {
next: Option<@mut t>,
r: r
}
pub fn main() {<|fim▁hole|> cast::forget(i1);
let i2 = ~0xA;
let i2p = cast::transmute_copy(&i2);
cast::forget(i2);
let u1 = U {a: 0xB, b: 0xC, c: i1p};
let u2 = U {a: 0xB, b: 0xC, c: i2p};
let x1 = @mut t(Node {
next: None,
r: r(u1)
});
let x2 = @mut t(Node {
next: None,
r: r(u2)
});
x1.next = Some(x2);
x2.next = Some(x1);
}
}<|fim▁end|> | unsafe {
let i1 = ~0xA;
let i1p = cast::transmute_copy(&i1); |
<|file_name|>NounPhraseTest.js<|end_file_name|><|fim▁begin|>var Nightmare = require('nightmare')
var NTU = require('./NightmareTestUtils')
const NounPhraseTest = (nightmare, delay) => {
return nightmare
// Can I open the addedit form and make it go away by clicking cancel?
.click('#add-np').wait(delay)
.click('#np-addedit-form #cancel').wait(delay)
.then( res => {return NTU.lookFor(nightmare, '#np-addedit-form', false)})
// If I open the addedit form, enter and a save a noun,
// will the form then go away and can I see the insertNounPhraseCheck mark in the quiz?
/*.then( res => {
return nightmare
.click('#add-np').wait(delay)
.type('#base', 'carrot').wait(delay)
.click('#save-np').wait(delay)
})
.then( res => {return NTU.lookFor(nightmare, '#np-addedit-form', false)})
.then( res => {return NTU.lookFor(nightmare, '#insertNounPhraseCheck', true)})
// Can I open the addedit form via editing and make it go away by clicking cancel?
.then( res => {
return nightmare
.click('#id1').wait(delay)
.click('#cancel').wait(delay)
})
.then( res => {return NTU.lookFor(nightmare, '#np-addedit-form', false)})
// If I open the addedit form via editing, change and a save a noun,
// will the form then go away and can I see the updateNounPhraseCheck mark in the quiz?
.then( res => {
return nightmare
.click('#id1').wait(delay)
.type('#base', 'beaver').wait(delay)
.click('#save-np').wait(delay)
})
.then( res => {return NTU.lookFor(nightmare, '#np-addedit-form', false)})
.then( res => {return NTU.lookFor(nightmare, '#updateNounPhraseCheck', true)})
// If I open the addedit form via editing and delete the noun,
// will the form then go away and can I see the deleteNounPhraseCheck mark in the quiz?
.then( res => {
return nightmare
.click('#id1').wait(delay)
.click('#delete-np').wait(delay)
})
.then( res => {return NTU.lookFor(nightmare, '#np-addedit-form', false)})
.then( res => {return NTU.lookFor(nightmare, '#deleteNounPhraseCheck', true)})
//.then( res => {return NTU.lookFor(nightmare, '#quiz', false)})
.then( res => {
return nightmare
.click('#add-np').wait(delay)
.type('#base', 'carrot').wait(delay)
.click('#save-np').wait(delay)
})*/
// Can I see the examples button?
//.then( res => {return NTU.lookFor(nightmare, '#examples', true)})
// Does it go away after I click it?
//.then( res => {return nightmare.click('#examples')})
//.then( res => {return NTU.lookFor(nightmare, '#examples', false)})
}
<|fim▁hole|><|fim▁end|> | module.exports = NounPhraseTest |
<|file_name|>server.js<|end_file_name|><|fim▁begin|>var express = require( 'express' ),
router = require( './router' );
var app = express();
app.set('port', (process.env.PORT || 3000));
router.define( app );
// Set up port
// ========================================================
app.listen(app.get('port'), function() {
console.log("Node app is running at localhost:" + app.get('port'));<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>NukeDemoController.js<|end_file_name|><|fim▁begin|>define( [
'jquery',
'angular',
'json!nuke/data/dummy_model.json',<|fim▁hole|> 'text!nuke/demo.html'
], function( $, ng, dummyModel, dummyLayout, htmlDemoTemplate ) {
'use strict';
var module = ng.module( 'NukeDemoApp', [ 'nbe' ] )
.run( [ '$templateCache', function( $templateCache ) {
$templateCache.put( 'lib/demo.html', htmlDemoTemplate );
} ] );
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function NukeDemoController( $scope, $timeout ) {
$scope.model = dummyModel;
$scope.layout = dummyLayout;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
module.controller( 'NukeDemoController', [ '$scope', '$timeout', NukeDemoController ] );
///////////////////////////////////////////////////////////////////////////////////////////////////////////
return module;
} );<|fim▁end|> | 'json!nuke/data/dummy_layout.json', |
<|file_name|>params.rs<|end_file_name|><|fim▁begin|>use valico::json_dsl;
use valico::json_schema;
use rustless::server::status;
use rustless::batteries::schemes;
use rustless::{Nesting};<|fim▁hole|>
#[test]
fn it_urldecodes_path_params() {
let app = app!(|api| {
api.prefix("api");
api.get("users/:user_id/messages/:message_id", |endpoint| {
endpoint.params(|params| {
params.req("user_id", |user_id| {
user_id.allow_values(&["100/200".to_string()])
})
});
endpoint.handle(|client, params| {
client.text(format!("{}", params.find("message_id").and_then(|obj| { obj.as_str() }).unwrap()))
})
})
});
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100%2F200/messages/a%2Fb%3F").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
assert_eq!(resp_body!(response), "a/b?");
}
#[test]
fn it_urldecodes_query_params() {
let app = app!(|api| {
api.prefix("api");
api.get("users", |endpoint| {
endpoint.params(|params| {
params.req("user_id", |user_id| {
user_id.allow_values(&["100&200".to_string()])
})
});
endpoint.handle(|client, params| {
client.text(format!("{}", params.find("message_id").and_then(|obj| { obj.as_str() }).unwrap()))
})
})
});
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users?user_id=100%26200&message_id=a%26b%3F").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
assert_eq!(resp_body!(response), "a&b?");
}
#[test]
fn it_validates_endpoint_simple_path_params() {
let app = app!(|api| {
api.prefix("api");
api.get("users/:user_id/messages/:message_id", |endpoint| {
edp_stub_handler!(endpoint)
})
});
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::NotFound);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::NotFound);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/Skywalker/messages/100").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/messages/12").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
}
#[test]
fn it_validates_typed_endpoint_path_params() {
let app = app!(|api| {
api.prefix("api");
api.get("users/:user_id/messages/:message_id", |endpoint| {
endpoint.params(|params| {
params.req_typed("user_id", json_dsl::u64());
params.req_typed("message_id", json_dsl::u64());
});
edp_stub_handler!(endpoint)
})
});
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/Skywalker/messages/100").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/messages/Skywalker").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/messages/12").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
}
#[test]
fn it_validates_query_params() {
let app = app!(|api| {
api.prefix("api");
api.get("users/:user_id", |endpoint| {
endpoint.params(|params| {
params.req_typed("user_id", json_dsl::u64());
params.req("profile", |profile| {
profile.allow_values(&["simple".to_string(), "full".to_string()])
})
});
edp_stub_handler!(endpoint)
})
});
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100?profile=1").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100?profile=fulll").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100?profile=full").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100?profile=simple").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
}
#[test]
fn it_validates_common_namespace_params() {
let app = app!(|api| {
api.prefix("api");
api.resources("users/:user_id", |users| {
users.params(|params| {
// one parameter goes from path and one from query-string or body
params.req_typed("user_id", json_dsl::u64());
params.req_typed("ext", json_dsl::string());
});
users.get("profile/:profile", |endpoint| {
endpoint.params(|params| {
params.req("profile", |profile| {
profile.allow_values(&["simple".to_string(), "full".to_string()])
})
});
edp_stub_handler!(endpoint)
})
})
});
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/profile/full").err().unwrap();
// missed `ext` param
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/profile/full?ext=some").ok().unwrap();
println!("{}", resp_body!(response));
assert_eq!(response.status, status::StatusCode::Ok);
}
#[test]
fn it_validates_params_with_json_schema() {
let mut app = app!(|api| {
api.prefix("api");
api.resources("users/:user_id", |users| {
users.params(|params| {
// one parameter goes from path and one from query-string or body
params.req("user_id", |user_id| {
user_id.coerce(json_dsl::u64());
user_id.schema(|schema| {
schema.maximum(1000f64, false);
})
});
params.schema(|schema| {
schema.max_properties(1);
});
});
users.get("profile/:profile", |endpoint| {
endpoint.params(|params| {
params.req("profile", |profile| {
profile.schema(|schema| {
schema.enum_(|values| {
values.push("full".to_string());
values.push("short".to_string());
})
})
})
});
edp_stub_handler!(endpoint)
})
})
});
schemes::enable_schemes(&mut app, json_schema::Scope::new()).unwrap();
let response = call_app!(app, Get, "http://127.0.0.1:3000/api/users/100/profile/full").ok().unwrap();
assert_eq!(response.status, status::StatusCode::Ok);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/1001/profile/full").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/1000/profile/wrong").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
let err_resp = call_app!(app, Get, "http://127.0.0.1:3000/api/users/1000/profile/full?one_more=1").err().unwrap();
assert_eq!(err_resp.response.status, status::StatusCode::BadRequest);
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON.
jsonpickle can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.
The object must be accessible globally via a module and must
inherit from object (AKA new-style classes).
Create an object::
class Thing(object):
def __init__(self, name):
self.name = name
obj = Thing('Awesome')
Use jsonpickle to transform the object into a JSON string::
import jsonpickle
frozen = jsonpickle.encode(obj)
Use jsonpickle to recreate a Python object from a JSON string::
thawed = jsonpickle.decode(frozen)
.. warning::
Loading a JSON string from an untrusted source represents a potential
security vulnerability. jsonpickle makes no attempt to sanitize the input.
The new object has the same type and data, but essentially is now a copy of
the original.
.. code-block:: python
assert obj.name == thawed.name
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON::
oneway = jsonpickle.encode(obj, unpicklable=False)
result = jsonpickle.decode(oneway)
assert obj.name == result['name'] == 'Awesome'
"""
import sys, os
from music21 import common
sys.path.append(common.getSourceFilePath() + os.path.sep + 'ext')
from jsonpickle import pickler
from jsonpickle import unpickler
from jsonpickle.backend import JSONBackend
from jsonpickle.version import VERSION
# ensure built-in handlers are loaded
__import__('jsonpickle.handlers')
__all__ = ('encode', 'decode')
__version__ = VERSION
json = JSONBackend()
# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
enable_fallthrough = json.enable_fallthrough
def encode(value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
backend=None,
warn=False,
max_iter=None):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
>>> encode('my string')
'"my string"'
>>> encode(36)
'36'
>>> encode({'foo': True})
'{"foo": true}'
>>> encode({'foo': True}, max_depth=0)
'"{\\'foo\\': True}"'
>>> encode({'foo': True}, max_depth=1)
'{"foo": "True"}'
"""
if backend is None:
backend = json
return pickler.encode(value,
backend=backend,
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
max_depth=max_depth,
warn=warn)
def decode(string, backend=None, keys=False):
"""Convert a JSON string into a Python object.
The keyword argument 'keys' defaults to False.
If set to True then jsonpickle will decode non-string dictionary keys
into python objects via the jsonpickle protocol.
>>> str(decode('"my string"'))
'my string'
>>> decode('36')
36
"""
if backend is None:
backend = json
return unpickler.decode(string, backend=backend, keys=keys)
<|fim▁hole|>dumps = encode
loads = decode<|fim▁end|> | # json.load(),loads(), dump(), dumps() compatibility |
<|file_name|>test_services.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
import os_traits
import six
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova import utils
class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
compute_driver = 'fake.SmallFakeDriver'
def test_compute_service_delete_ensure_related_cleanup(self):
"""Tests deleting a compute service and the related cleanup associated
with that like the compute_nodes table entry, removing the host
from any aggregates, the host mapping in the API DB and the associated
resource provider in Placement.
"""
compute = self._start_compute('host1')
# Make sure our compute host is represented as expected.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(1, len(services))
service = services[0]
# Now create a host aggregate and add our host to it.
aggregate = self.admin_api.post_aggregate(
{'aggregate': {'name': 'agg1'}})
self.admin_api.add_host_to_aggregate(aggregate['id'], service['host'])
# Make sure the host is in the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([service['host']], aggregate['hosts'])
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# We'll know there is a host mapping implicitly if os-hypervisors
# returned something in _get_provider_uuid_by_host, but let's also
# make sure the host mapping is there like we expect.
ctxt = nova_context.get_admin_context()
objects.HostMapping.get_by_host(ctxt, service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Make sure the resource provider has inventory.
inventories = self._get_provider_inventory(rp_uuid)
# Expect a minimal set of inventory for the fake virt driver.
for resource_class in [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB]:
self.assertIn(resource_class, inventories)
# Now create a server so that the resource provider has some allocation
# records.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, service['host'])
# Now the fun part, delete the compute service and make sure related
# resources are cleaned up, like the compute node, host mapping, and
# resource provider. We have to first stop the compute service so
# it doesn't recreate the compute node during the
# update_available_resource periodic task.
self.admin_api.put_service(service['id'], {'forced_down': True})
compute.stop()
# The first attempt should fail since there is an instance on the
# compute host.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertIn('Unable to delete compute service that is hosting '
'instances.', six.text_type(ex))
self.assertEqual(409, ex.response.status_code)
# Now delete the instance and wait for it to be gone.
self._delete_and_check_allocations(server)
# Now we can delete the service.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is deleted.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(0, len(services))
# Make sure the host was removed from the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([], aggregate['hosts'])
# Trying to get the hypervisor should result in a 404.
self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % service['host'],
check_response_status=[404])
# The host mapping should also be gone.
self.assertRaises(exception.HostMappingNotFound,
objects.HostMapping.get_by_host,
ctxt, service['host'])
# And finally, the resource provider should also be gone. The API
# will perform a cascading delete of the resource provider inventory
# and allocation information.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(404, resp.status)
def test_evacuate_then_delete_compute_service(self):
"""Tests a scenario where a server is created on a host, the host
goes down, the server is evacuated to another host, and then the
source host compute service is deleted. After that the deleted
compute service is restarted. Related placement resources are checked
throughout.
"""
# Create our source host that we will evacuate *from* later.
host1 = self._start_compute('host1')
# Create a server which will go on host1 since it is the only host.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Get the compute service record for host1 so we can manage it.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# Get the corresponding resource provider uuid for host1.
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Down the compute service for host1 so we can evacuate from it.
self.admin_api.put_service(service['id'], {'forced_down': True})
host1.stop()
# Start another host and trigger the server evacuate to that host.
self._start_compute('host2')
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# The host does not change until after the status is changed to ACTIVE
# so wait for both parameters.
self._wait_for_server_parameter(server, {
'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'})
# Delete the compute service for host1 and check the related
# placement resources for that host.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is gone.
services = self.admin_api.get_services(
binary='nova-compute', host='host1')
self.assertEqual(0, len(services), services)
# FIXME(mriedem): This is bug 1829479 where the compute service is
# deleted but the resource provider is not because there are still
# allocations against the provider from the evacuated server.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(rp_uuid, flavor)
# Try to restart the host1 compute service to create a new resource
# provider.
self.restart_compute_service(host1)
# FIXME(mriedem): This is bug 1817833 where restarting the now-deleted
# compute service attempts to create a new resource provider with a
# new uuid but the same name which results in a conflict. The service
# does not die, however, because _update_available_resource_for_node
# catches and logs but does not re-raise the error.
log_output = self.stdlog.logger.output
self.assertIn('Error updating resources for node host1.', log_output)
self.assertIn('Failed to create resource provider host1', log_output)
def test_migrate_confirm_after_deleted_source_compute(self):
"""Tests a scenario where a server is cold migrated and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to confirm the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')
host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Start a second compute service so we can cold migrate there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Cold migrate the server to host2.
self._migrate_and_check_allocations(
server, flavor, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor)
# Now try to confirm the migration.
self._confirm_resize(server)
# Delete the host1 service since the migration is confirmed and the
# server is on host2.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# The host1 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(404, resp.status)
def test_resize_revert_after_deleted_source_compute(self):
"""Tests a scenario where a server is resized and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to revert the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')<|fim▁hole|> server = self._boot_and_check_allocations(flavor1, 'host1')
# Start a second compute service so we can resize there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Resize the server to host2.
self._resize_and_check_allocations(
server, flavor1, flavor2, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
# Now revert the resize.
self._revert_resize(server)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
zero_flavor = {'vcpus': 0, 'ram': 0, 'disk': 0, 'extra_specs': {}}
self.assertFlavorMatchesUsage(host2_rp_uuid, zero_flavor)
# Delete the host2 service since the migration is reverted and the
# server is on host1 again.
service2 = self.admin_api.get_services(
binary='nova-compute', host='host2')[0]
self.admin_api.api_delete('/os-services/%s' % service2['id'])
# The host2 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host2_rp_uuid)
self.assertEqual(404, resp.status)
class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests the API, compute service and Placement interaction with the
COMPUTE_STATUS_DISABLED trait when a compute service is enable/disabled.
This version of the test uses the 2.latest microversion for testing the
2.53+ behavior of the PUT /os-services/{service_id} API.
"""
compute_driver = 'fake.SmallFakeDriver'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.53 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
status = 'disabled' if disabled else 'enabled'
req = {'status': status}
if forced_down is not None:
req['forced_down'] = forced_down
self.admin_api.put_service(service['id'], req)
def test_compute_status_filter(self):
"""Tests the compute_status_filter placement request filter"""
# Start a compute service so a compute node and resource provider is
# created.
compute = self._start_compute('host1')
# Get the UUID of the resource provider that was created.
rp_uuid = self._get_provider_uuid_by_host('host1')
# Get the service from the compute API.
services = self.admin_api.get_services(binary='nova-compute',
host='host1')
self.assertEqual(1, len(services))
service = services[0]
# At this point, the service should be enabled and the
# COMPUTE_STATUS_DISABLED trait should not be set on the
# resource provider in placement.
self.assertEqual('enabled', service['status'])
rp_traits = self._get_provider_traits(rp_uuid)
trait = os_traits.COMPUTE_STATUS_DISABLED
self.assertNotIn(trait, rp_traits)
# Now disable the compute service via the API.
self._update_service(service, disabled=True)
# The update to placement should be synchronous so check the provider
# traits and COMPUTE_STATUS_DISABLED should be set.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
# Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_server(networks=networks)
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded.
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
# Now enable the service and the trait should be gone.
self._update_service(service, disabled=False)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Try creating another server and it should be OK.
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(server, 'ACTIVE')
# Stop, force-down and disable the service so the API cannot call
# the compute service to sync the trait.
compute.stop()
self._update_service(service, disabled=True, forced_down=True)
# The API should have logged a message about the service being down.
self.assertIn('Compute service on host host1 is down. The '
'COMPUTE_STATUS_DISABLED trait will be synchronized '
'when the service is restarted.',
self.stdlog.logger.output)
# The trait should not be on the provider even though the node is
# disabled.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Restart the compute service which should sync and set the trait on
# the provider in placement.
self.restart_compute_service(compute)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
class ComputeStatusFilterTest211(ComputeStatusFilterTest):
"""Extends ComputeStatusFilterTest and uses the 2.11 API for the
legacy os-services disable/enable/force-down API behavior
"""
microversion = '2.11'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.11 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
# Before 2.53 the service is uniquely identified by host and binary.
body = {
'host': service['host'],
'binary': service['binary']
}
# Handle forced_down first if provided since the enable/disable
# behavior in the API depends on it.
if forced_down is not None:
body['forced_down'] = forced_down
self.admin_api.api_put('/os-services/force-down', body)
if disabled:
self.admin_api.api_put('/os-services/disable', body)
else:
self.admin_api.api_put('/os-services/enable', body)
def _get_provider_uuid_by_host(self, host):
# We have to temporarily mutate to 2.53 to get the hypervisor UUID.
with utils.temporary_mutation(self.admin_api, microversion='2.53'):
return super(ComputeStatusFilterTest211,
self)._get_provider_uuid_by_host(host)<|fim▁end|> | host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavors = self.api.get_flavors()
flavor1 = flavors[0]
flavor2 = flavors[1] |
<|file_name|>ogone.py<|end_file_name|><|fim▁begin|># -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
from lxml import etree, objectify
from pprint import pformat
import time
from urllib import urlencode
import urllib2
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.addons.payment_ogone.data import ogone
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(osv.Model):
_inherit = 'payment.acquirer'
def _get_ogone_urls(self, cr, uid, environment, context=None):
""" Ogone URLS:
- standard order: POST address for form-based
@TDETODO: complete me
"""
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _get_providers(self, cr, uid, context=None):
providers = super(PaymentAcquirerOgone, self)._get_providers(cr, uid, context=context)
providers.append(['ogone', 'Ogone'])
return providers
_columns = {
'ogone_pspid': fields.char('PSPID', required_if_provider='ogone'),
'ogone_userid': fields.char('API User ID', required_if_provider='ogone'),
'ogone_password': fields.char('API User Password', required_if_provider='ogone'),
'ogone_shakey_in': fields.char('SHA Key IN', size=32, required_if_provider='ogone'),
'ogone_shakey_out': fields.char('SHA Key OUT', size=32, required_if_provider='ogone'),
}
def _ogone_generate_shasign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (ogone
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'ogone'
key = getattr(acquirer, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',<|fim▁hole|> 'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
ogone_tx_values = dict(tx_values)
temp_ogone_tx_values = {
'PSPID': acquirer.ogone_pspid,
'ORDERID': tx_values['reference'],
'AMOUNT': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'CURRENCY': tx_values['currency'] and tx_values['currency'].name or '',
'LANGUAGE': partner_values['lang'],
'CN': partner_values['name'],
'EMAIL': partner_values['email'],
'OWNERZIP': partner_values['zip'],
'OWNERADDRESS': partner_values['address'],
'OWNERTOWN': partner_values['city'],
'OWNERCTY': partner_values['country'] and partner_values['country'].name or '',
'OWNERTELNO': partner_values['phone'],
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
}
if ogone_tx_values.get('return_url'):
temp_ogone_tx_values['PARAMPLUS'] = 'return_url=%s' % ogone_tx_values.pop('return_url')
shasign = self._ogone_generate_shasign(acquirer, 'in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return partner_values, ogone_tx_values
def ogone_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_ogone_urls(cr, uid, acquirer.environment, context=context)['ogone_standard_order_url']
class PaymentTxOgone(osv.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
_columns = {
'ogone_3ds': fields.boolean('3DS Activated'),
'ogone_3ds_html': fields.html('3DS HTML'),
'ogone_complus': fields.char('Complus'),
'ogone_payid': fields.char('PayID', help='Payment ID, generated by Ogone')
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _ogone_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN')
if not reference or not pay_id or not shasign:
error_msg = 'Ogone: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Ogone: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'out', data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Ogone: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _ogone_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('PAYID') != tx.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, cr, uid, tx, data, context=None):
if tx.state == 'done':
_logger.warning('Ogone: trying to validate an already validated tx (ref %s)' % tx.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
tx.write({
'state': 'done',
'date_validate': data['TRXDATE'],
'acquirer_reference': data['PAYID'],
})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERROR'),
'error_code': data.get('NCERRORPLUS'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERRORPLUS')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_create_alias(self, cr, uid, id, values, context=None):
""" Create an alias at Ogone via batch.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
assert tx.type == 'server2server', 'Calling s2s dedicated method for a %s acquirer' % tx.type
alias = 'OPENERP-%d-%d' % (tx.partner_id.id, tx.id)
expiry_date = '%s%s' % (values['expiry_date_mm'], values['expiry_date_yy'][2:])
line = 'ADDALIAS;%(alias)s;%(holder_name)s;%(number)s;%(expiry_date)s;%(brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry_date=expiry_date, pspid=tx.acquirer_id.ogone_pspid)
tx_data = {
'FILE_REFERENCE': 'OPENERP-NEW-ALIAS-%s' % time.time(), # something unique,
'TRANSACTION_CODE': 'ATR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': line,
'REPLY_TYPE': 'XML',
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
# TODO: fix URL computation
request = urllib2.Request(tx.acquirer_id.ogone_afu_agree_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error) # TODO specific exception
tx.write({'partner_reference': alias})
return True
def ogone_s2s_generate_values(self, cr, uid, id, custom_values, context=None):
""" Generate valid Ogone values for a s2s tx.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = {
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'OrderID': tx.reference,
'amount': '%d' % int(float_round(tx.amount, 2) * 100), # tde check amount or str * 100 ?
'CURRENCY': tx.currency_id.name,
'LANGUAGE': tx.partner_lang,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': tx.partner_reference,
'RTIMEOUT': 30,
}
if custom_values.get('ogone_cvc'):
tx_data['CVC'] = custom_values.get('ogone_cvc')
if custom_values.pop('ogone_3ds', None):
tx_data.update({
'FLAG3D': 'Y', # YEAH!!
})
if custom_values.get('ogone_complus'):
tx_data['COMPLUS'] = custom_values.get('ogone_complus')
if custom_values.get('ogone_accept_url'):
pass
shasign = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'in', tx_data)
tx_data['SHASIGN'] = shasign
return tx_data
def ogone_s2s_feedback(self, cr, uid, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
pass
def ogone_s2s_execute(self, cr, uid, id, values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = self.ogone_s2s_generate_values(cr, uid, id, values, context=context)
_logger.info('Generated Ogone s2s data %s', pformat(tx_data)) # debug
request = urllib2.Request(tx.acquirer_id.ogone_direct_order_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
_logger.info('Contacted Ogone direct order; result %s', result) # debug
tree = objectify.fromstring(result)
payid = tree.get('PAYID')
query_direct_data = dict(
PSPID=tx.acquirer_id.ogone_pspid,
USERID=tx.acquirer_id.ogone_userid,
PSWD=tx.acquirer_id.ogone_password,
ID=payid,
)
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (tx.acquirer_id.environment,)
tries = 2
tx_done = False
tx_status = False
while not tx_done or tries > 0:
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
# see https://secure.ogone.com/ncol/paymentinfos1.asp
VALID_TX = [5, 9]
WAIT_TX = [41, 50, 51, 52, 55, 56, 91, 92, 99]
PENDING_TX = [46] # 3DS HTML response
# other status are errors...
status = tree.get('STATUS')
if status == '':
status = None
else:
status = int(status)
if status in VALID_TX:
tx_status = True
tx_done = True
elif status in PENDING_TX:
html = str(tree.HTML_ANSWER)
tx_data.update(ogone_3ds_html=html.decode('base64'))
tx_status = False
tx_done = True
elif status in WAIT_TX:
time.sleep(1500)
request = urllib2.Request(query_direct_url, urlencode(query_direct_data))
result = urllib2.urlopen(request).read()
_logger.debug('Contacted Ogone query direct; result %s', result)
else:
error_code = tree.get('NCERROR')
if not ogone.retryable(error_code):
error_str = tree.get('NCERRORPLUS')
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = 'ERROR: %s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.info(error)
raise Exception(error)
tries = tries - 1
if not tx_done and tries == 0:
raise Exception('Cannot get transaction status...')
return tx_status<|fim▁end|> | 'CARDNO',
'CCCTY', |
<|file_name|>expression.js<|end_file_name|><|fim▁begin|>import {LooseParser} from "./state"
import {isDummy} from "./parseutil"
import {tokTypes as tt} from ".."
const lp = LooseParser.prototype
lp.checkLVal = function(expr) {
if (!expr) return expr
switch (expr.type) {
case "Identifier":
case "MemberExpression":
return expr
case "ParenthesizedExpression":
expr.expression = this.checkLVal(expr.expression)
return expr
default:
return this.dummyIdent()
}
}
lp.parseExpression = function(noIn) {
let start = this.storeCurrentPos()
let expr = this.parseMaybeAssign(noIn)
if (this.tok.type === tt.comma) {
let node = this.startNodeAt(start)
node.expressions = [expr]
while (this.eat(tt.comma)) node.expressions.push(this.parseMaybeAssign(noIn))
return this.finishNode(node, "SequenceExpression")
}
return expr
}
lp.parseParenExpression = function() {
this.pushCx()
this.expect(tt.parenL)
let val = this.parseExpression()
this.popCx()
this.expect(tt.parenR)
return val
}
lp.parseMaybeAssign = function(noIn) {
if (this.toks.isContextual("yield")) {
let node = this.startNode()
this.next()
if (this.semicolon() || this.canInsertSemicolon() || (this.tok.type != tt.star && !this.tok.type.startsExpr)) {
node.delegate = false
node.argument = null
} else {
node.delegate = this.eat(tt.star)
node.argument = this.parseMaybeAssign()
}
return this.finishNode(node, "YieldExpression")
}
let start = this.storeCurrentPos()
let left = this.parseMaybeConditional(noIn)
if (this.tok.type.isAssign) {
let node = this.startNodeAt(start)
node.operator = this.tok.value
node.left = this.tok.type === tt.eq ? this.toAssignable(left) : this.checkLVal(left)
this.next()
node.right = this.parseMaybeAssign(noIn)
return this.finishNode(node, "AssignmentExpression")
}
return left
}
lp.parseMaybeConditional = function(noIn) {
let start = this.storeCurrentPos()
let expr = this.parseExprOps(noIn)
if (this.eat(tt.question)) {
let node = this.startNodeAt(start)
node.test = expr
node.consequent = this.parseMaybeAssign()
node.alternate = this.expect(tt.colon) ? this.parseMaybeAssign(noIn) : this.dummyIdent()
return this.finishNode(node, "ConditionalExpression")
}
return expr
}
lp.parseExprOps = function(noIn) {
let start = this.storeCurrentPos()
let indent = this.curIndent, line = this.curLineStart
return this.parseExprOp(this.parseMaybeUnary(false), start, -1, noIn, indent, line)
}
lp.parseExprOp = function(left, start, minPrec, noIn, indent, line) {
if (this.curLineStart != line && this.curIndent < indent && this.tokenStartsLine()) return left
let prec = this.tok.type.binop
if (prec != null && (!noIn || this.tok.type !== tt._in)) {
if (prec > minPrec) {
let node = this.startNodeAt(start)
node.left = left
node.operator = this.tok.value
this.next()
if (this.curLineStart != line && this.curIndent < indent && this.tokenStartsLine()) {
node.right = this.dummyIdent()
} else {
let rightStart = this.storeCurrentPos()
node.right = this.parseExprOp(this.parseMaybeUnary(false), rightStart, prec, noIn, indent, line)
}
this.finishNode(node, /&&|\|\|/.test(node.operator) ? "LogicalExpression" : "BinaryExpression")
return this.parseExprOp(node, start, minPrec, noIn, indent, line)
}
}
return left
}
lp.parseMaybeUnary = function(sawUnary) {
let start = this.storeCurrentPos(), expr
if (this.tok.type.prefix) {
let node = this.startNode(), update = this.tok.type === tt.incDec
if (!update) sawUnary = true
node.operator = this.tok.value
node.prefix = true
this.next()
node.argument = this.parseMaybeUnary(true)
if (update) node.argument = this.checkLVal(node.argument)
expr = this.finishNode(node, update ? "UpdateExpression" : "UnaryExpression")
} else if (this.tok.type === tt.ellipsis) {
let node = this.startNode()
this.next()
node.argument = this.parseMaybeUnary(sawUnary)
expr = this.finishNode(node, "SpreadElement")
} else {
expr = this.parseExprSubscripts()
while (this.tok.type.postfix && !this.canInsertSemicolon()) {
let node = this.startNodeAt(start)
node.operator = this.tok.value
node.prefix = false
node.argument = this.checkLVal(expr)
this.next()
expr = this.finishNode(node, "UpdateExpression")
}
}
if (!sawUnary && this.eat(tt.starstar)) {
let node = this.startNodeAt(start)
node.operator = "**"
node.left = expr
node.right = this.parseMaybeUnary(false)
return this.finishNode(node, "BinaryExpression")
}
return expr
}
lp.parseExprSubscripts = function() {
let start = this.storeCurrentPos()
return this.parseSubscripts(this.parseExprAtom(), start, false, this.curIndent, this.curLineStart)
}
lp.parseSubscripts = function(base, start, noCalls, startIndent, line) {
for (;;) {
if (this.curLineStart != line && this.curIndent <= startIndent && this.tokenStartsLine()) {
if (this.tok.type == tt.dot && this.curIndent == startIndent)
--startIndent
else
return base
}
if (this.eat(tt.dot)) {
let node = this.startNodeAt(start)
node.object = base
if (this.curLineStart != line && this.curIndent <= startIndent && this.tokenStartsLine())
node.property = this.dummyIdent()
else
node.property = this.parsePropertyAccessor() || this.dummyIdent()
node.computed = false
base = this.finishNode(node, "MemberExpression")
} else if (this.tok.type == tt.bracketL) {
this.pushCx()
this.next()
let node = this.startNodeAt(start)
node.object = base
node.property = this.parseExpression()
node.computed = true
this.popCx()
this.expect(tt.bracketR)
base = this.finishNode(node, "MemberExpression")
} else if (!noCalls && this.tok.type == tt.parenL) {
let node = this.startNodeAt(start)
node.callee = base
node.arguments = this.parseExprList(tt.parenR)
base = this.finishNode(node, "CallExpression")
} else if (this.tok.type == tt.backQuote) {
let node = this.startNodeAt(start)
node.tag = base
node.quasi = this.parseTemplate()
base = this.finishNode(node, "TaggedTemplateExpression")
} else {
return base
}
}
}
lp.parseExprAtom = function() {
let node
switch (this.tok.type) {
case tt._this:
case tt._super:
let type = this.tok.type === tt._this ? "ThisExpression" : "Super"
node = this.startNode()
this.next()
return this.finishNode(node, type)
case tt.name:
let start = this.storeCurrentPos()
let id = this.parseIdent()
return this.eat(tt.arrow) ? this.parseArrowExpression(this.startNodeAt(start), [id]) : id
case tt.regexp:
node = this.startNode()
let val = this.tok.value
node.regex = {pattern: val.pattern, flags: val.flags}
node.value = val.value
node.raw = this.input.slice(this.tok.start, this.tok.end)
this.next()
return this.finishNode(node, "Literal")
case tt.num: case tt.string:
node = this.startNode()
node.value = this.tok.value
node.raw = this.input.slice(this.tok.start, this.tok.end)
this.next()
return this.finishNode(node, "Literal")
case tt._null: case tt._true: case tt._false:
node = this.startNode()
node.value = this.tok.type === tt._null ? null : this.tok.type === tt._true
node.raw = this.tok.type.keyword
this.next()
return this.finishNode(node, "Literal")
case tt.parenL:
let parenStart = this.storeCurrentPos()
this.next()
let inner = this.parseExpression()
this.expect(tt.parenR)
if (this.eat(tt.arrow)) {
return this.parseArrowExpression(this.startNodeAt(parenStart), inner.expressions || (isDummy(inner) ? [] : [inner]))
}
if (this.options.preserveParens) {
let par = this.startNodeAt(parenStart)
par.expression = inner
inner = this.finishNode(par, "ParenthesizedExpression")
}
return inner
case tt.bracketL:
node = this.startNode()
node.elements = this.parseExprList(tt.bracketR, true)
return this.finishNode(node, "ArrayExpression")
case tt.braceL:
return this.parseObj()
case tt._class:
return this.parseClass()
case tt._function:
node = this.startNode()
this.next()
return this.parseFunction(node, false)
case tt._new:
return this.parseNew()
case tt.backQuote:
return this.parseTemplate()
default:
return this.dummyIdent()
}
}
lp.parseNew = function() {
let node = this.startNode(), startIndent = this.curIndent, line = this.curLineStart
let meta = this.parseIdent(true)
if (this.options.ecmaVersion >= 6 && this.eat(tt.dot)) {
node.meta = meta
node.property = this.parseIdent(true)
return this.finishNode(node, "MetaProperty")
}
let start = this.storeCurrentPos()
node.callee = this.parseSubscripts(this.parseExprAtom(), start, true, startIndent, line)
if (this.tok.type == tt.parenL) {
node.arguments = this.parseExprList(tt.parenR)
} else {
node.arguments = []
}
return this.finishNode(node, "NewExpression")
}
lp.parseTemplateElement = function() {
let elem = this.startNode()
elem.value = {
raw: this.input.slice(this.tok.start, this.tok.end).replace(/\r\n?/g, '\n'),
cooked: this.tok.value
}
this.next()
elem.tail = this.tok.type === tt.backQuote
return this.finishNode(elem, "TemplateElement")
}
lp.parseTemplate = function() {
let node = this.startNode()
this.next()
node.expressions = []
let curElt = this.parseTemplateElement()
node.quasis = [curElt]
while (!curElt.tail) {
this.next()
node.expressions.push(this.parseExpression())
if (this.expect(tt.braceR)) {
curElt = this.parseTemplateElement()
} else {
curElt = this.startNode()
curElt.value = {cooked: '', raw: ''}
curElt.tail = true
}
node.quasis.push(curElt)
}
this.expect(tt.backQuote)
return this.finishNode(node, "TemplateLiteral")
}
lp.parseObj = function() {
let node = this.startNode()
node.properties = []
this.pushCx()
let indent = this.curIndent + 1, line = this.curLineStart
this.eat(tt.braceL)
if (this.curIndent + 1 < indent) { indent = this.curIndent; line = this.curLineStart }
while (!this.closes(tt.braceR, indent, line)) {
let prop = this.startNode(), isGenerator, start
if (this.options.ecmaVersion >= 6) {
start = this.storeCurrentPos()
prop.method = false
prop.shorthand = false
isGenerator = this.eat(tt.star)
}
this.parsePropertyName(prop)
if (isDummy(prop.key)) { if (isDummy(this.parseMaybeAssign())) this.next(); this.eat(tt.comma); continue }
if (this.eat(tt.colon)) {
prop.kind = "init"
prop.value = this.parseMaybeAssign()
} else if (this.options.ecmaVersion >= 6 && (this.tok.type === tt.parenL || this.tok.type === tt.braceL)) {
prop.kind = "init"
prop.method = true
prop.value = this.parseMethod(isGenerator)
} else if (this.options.ecmaVersion >= 5 && prop.key.type === "Identifier" &&
!prop.computed && (prop.key.name === "get" || prop.key.name === "set") &&
(this.tok.type != tt.comma && this.tok.type != tt.braceR)) {
prop.kind = prop.key.name
this.parsePropertyName(prop)
prop.value = this.parseMethod(false)
} else {
prop.kind = "init"
if (this.options.ecmaVersion >= 6) {
if (this.eat(tt.eq)) {
let assign = this.startNodeAt(start)
assign.operator = "="
assign.left = prop.key
assign.right = this.parseMaybeAssign()
prop.value = this.finishNode(assign, "AssignmentExpression")
} else {
prop.value = prop.key
}
} else {
prop.value = this.dummyIdent()
}
prop.shorthand = true
}
node.properties.push(this.finishNode(prop, "Property"))
this.eat(tt.comma)
}
this.popCx()
if (!this.eat(tt.braceR)) {
// If there is no closing brace, make the node span to the start
// of the next token (this is useful for Tern)
this.last.end = this.tok.start
if (this.options.locations) this.last.loc.end = this.tok.loc.start
}
return this.finishNode(node, "ObjectExpression")
}
lp.parsePropertyName = function(prop) {
if (this.options.ecmaVersion >= 6) {
if (this.eat(tt.bracketL)) {
prop.computed = true
prop.key = this.parseExpression()
this.expect(tt.bracketR)
return
} else {
prop.computed = false
}
}
let key = (this.tok.type === tt.num || this.tok.type === tt.string) ? this.parseExprAtom() : this.parseIdent()
prop.key = key || this.dummyIdent()
}
lp.parsePropertyAccessor = function() {
if (this.tok.type === tt.name || this.tok.type.keyword) return this.parseIdent()
}
lp.parseIdent = function() {
let name = this.tok.type === tt.name ? this.tok.value : this.tok.type.keyword
if (!name) return this.dummyIdent()
let node = this.startNode()
this.next()
node.name = name
return this.finishNode(node, "Identifier")
}
lp.initFunction = function(node) {
node.id = null
node.params = []
if (this.options.ecmaVersion >= 6) {
node.generator = false
node.expression = false
}
}
// Convert existing expression atom to assignable pattern
// if possible.
lp.toAssignable = function(node, binding) {
if (!node || node.type == "Identifier" || (node.type == "MemberExpression" && !binding)) {
// Okay
} else if (node.type == "ParenthesizedExpression") {
node.expression = this.toAssignable(node.expression, binding)
} else if (this.options.ecmaVersion < 6) {
return this.dummyIdent()
} else if (node.type == "ObjectExpression") {
node.type = "ObjectPattern"
let props = node.properties
for (let i = 0; i < props.length; i++)
props[i].value = this.toAssignable(props[i].value, binding)
} else if (node.type == "ArrayExpression") {
node.type = "ArrayPattern"
this.toAssignableList(node.elements, binding)
} else if (node.type == "SpreadElement") {
node.type = "RestElement"
node.argument = this.toAssignable(node.argument, binding)
} else if (node.type == "AssignmentExpression") {
node.type = "AssignmentPattern"
delete node.operator
} else {
return this.dummyIdent()
}
return node
}
<|fim▁hole|>}
lp.parseFunctionParams = function(params) {
params = this.parseExprList(tt.parenR)
return this.toAssignableList(params, true)
}
lp.parseMethod = function(isGenerator) {
let node = this.startNode()
this.initFunction(node)
node.params = this.parseFunctionParams()
node.generator = isGenerator || false
node.expression = this.options.ecmaVersion >= 6 && this.tok.type !== tt.braceL
node.body = node.expression ? this.parseMaybeAssign() : this.parseBlock()
return this.finishNode(node, "FunctionExpression")
}
lp.parseArrowExpression = function(node, params) {
this.initFunction(node)
node.params = this.toAssignableList(params, true)
node.expression = this.tok.type !== tt.braceL
node.body = node.expression ? this.parseMaybeAssign() : this.parseBlock()
return this.finishNode(node, "ArrowFunctionExpression")
}
lp.parseExprList = function(close, allowEmpty) {
this.pushCx()
let indent = this.curIndent, line = this.curLineStart, elts = []
this.next() // Opening bracket
while (!this.closes(close, indent + 1, line)) {
if (this.eat(tt.comma)) {
elts.push(allowEmpty ? null : this.dummyIdent())
continue
}
let elt = this.parseMaybeAssign()
if (isDummy(elt)) {
if (this.closes(close, indent, line)) break
this.next()
} else {
elts.push(elt)
}
this.eat(tt.comma)
}
this.popCx()
if (!this.eat(close)) {
// If there is no closing brace, make the node span to the start
// of the next token (this is useful for Tern)
this.last.end = this.tok.start
if (this.options.locations) this.last.loc.end = this.tok.loc.start
}
return elts
}<|fim▁end|> | lp.toAssignableList = function(exprList, binding) {
for (let i = 0; i < exprList.length; i++)
exprList[i] = this.toAssignable(exprList[i], binding)
return exprList |
<|file_name|>test_inotify.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
import unittest
import inotify.constants
import inotify.calls
import inotify.adapters
import inotify.test_support
try:
unicode
except NameError:
_HAS_PYTHON2_UNICODE_SUPPORT = False
else:
_HAS_PYTHON2_UNICODE_SUPPORT = True
class TestInotify(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotify, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is True, "Not in Python 3")
def test__international_naming_python3(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(path, '新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, 'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, 'filename'),
]
self.assertEquals(events, expected)
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is False, "Not in Python 2")
def test__international_naming_python2(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(unicode(path), u'新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, u'filename料夾'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, u'filename料夾'),
]
self.assertEquals(events, expected)
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.Inotify()
i.add_watch(path1)
with open('ignored_new_file', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file'), 'w'):
pass
with open(os.path.join(path2, 'ignored_new_file'), 'w'):
pass
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16),
['IN_CREATE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16),
['IN_OPEN'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16),
['IN_CLOSE_WRITE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16),
['IN_DELETE'],
path1,
'seen_new_file'
)
]
self.assertEquals(events, expected)
# This can't be removed until *after* we've read the events because
# they'll be flushed the moment we remove the watch.
i.remove_watch(path1)
with open(os.path.join(path1, 'ignored_after_removal'), 'w'):
pass
events = self.__read_all_events(i)
self.assertEquals(events, [])
@staticmethod
def _open_write_close(*args):
with open(os.path.join(*args), 'w'):
pass
@staticmethod
def _make_temp_path(*args):
path = os.path.join(*args)
os.mkdir(path)
return path
@staticmethod
def _event_general(wd, mask, type_name, path, filename):
return ((inotify.adapters._INOTIFY_EVENT(wd=wd, mask=mask, cookie=0, len=16)),
[type_name],
path,
filename)
@staticmethod
def _event_create(wd, path, filename):
return TestInotify._event_general(wd, 256, 'IN_CREATE', path, filename)
@staticmethod
def _event_open(wd, path, filename):
return TestInotify._event_general(wd, 32, 'IN_OPEN', path, filename)
@staticmethod<|fim▁hole|> def _event_close_write(wd, path, filename):
return TestInotify._event_general(wd, 8, 'IN_CLOSE_WRITE', path, filename)
def test__watch_list_of_paths(self):
with inotify.test_support.temp_path() as path:
path1 = TestInotify._make_temp_path(path, 'aa')
path2 = TestInotify._make_temp_path(path, 'bb')
i = inotify.adapters.Inotify([path1, path2])
TestInotify._open_write_close('ignored_new_file')
TestInotify._open_write_close(path1, 'seen_new_file')
TestInotify._open_write_close(path2, 'seen_new_file2')
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
TestInotify._event_create(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_open(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_close_write(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_create(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_open(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_close_write(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_general(wd=1, mask=512, type_name='IN_DELETE',
path=path1, filename='seen_new_file')
]
self.assertEquals(events, expected)
def test__error_on_watch_nonexistent_folder(self):
i = inotify.adapters.Inotify()
with self.assertRaises(inotify.calls.InotifyError):
i.add_watch('/dev/null/foo')
def test__get_event_names(self):
all_mask = 0
for bit in inotify.constants.MASK_LOOKUP.keys():
all_mask |= bit
all_names = inotify.constants.MASK_LOOKUP.values()
all_names = list(all_names)
i = inotify.adapters.Inotify()
names = i._get_event_names(all_mask)
self.assertEquals(names, all_names)
class TestInotifyTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTree, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open('seen_new_file1', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file2'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file3'), 'w'):
pass
os.remove(os.path.join(path, 'seen_new_file1'))
os.remove(os.path.join(path1, 'seen_new_file2'))
os.remove(os.path.join(path2, 'seen_new_file3'))
os.rmdir(path1)
os.rmdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16), ['IN_DELETE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=512, cookie=0, len=16), ['IN_DELETE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'aa'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'bb'),
]
self.assertEquals(events, expected)
def test__renames(self):
# Since we're not reading the events one at a time in a loop and
# removing or renaming folders will flush any queued events, we have to
# group things in order to check things first before such operations.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
old_path = os.path.join(path, 'old_folder')
new_path = os.path.join(path, 'new_folder')
os.mkdir(old_path)
events1 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=events1[0][0].cookie, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'old_folder'),
]
self.assertEquals(events1, expected)
os.rename(old_path, new_path)
events2 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741888, cookie=events2[0][0].cookie, len=16), ['IN_MOVED_FROM', 'IN_ISDIR'], path, 'old_folder'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741952, cookie=events2[1][0].cookie, len=16), ['IN_MOVED_TO', 'IN_ISDIR'], path, 'new_folder'),
]
self.assertEquals(events2, expected)
with open(os.path.join(new_path, 'old_filename'), 'w'):
pass
os.rename(
os.path.join(new_path, 'old_filename'),
os.path.join(new_path, 'new_filename'))
os.remove(os.path.join('new_folder', 'new_filename'))
os.rmdir('new_folder')
events3 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=64, cookie=events3[3][0].cookie, len=16), ['IN_MOVED_FROM'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=128, cookie=events3[4][0].cookie, len=16), ['IN_MOVED_TO'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'new_folder'),
]
self.assertEquals(events3, expected)
def test__automatic_new_watches_on_new_paths(self):
# Tests that watches are actively established as new folders are
# created.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'folder1'),
]
self.assertEquals(events, expected)
os.mkdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path1, 'folder2'),
]
self.assertEquals(events, expected)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
def test__automatic_new_watches_on_existing_paths(self):
# Tests whether the watches are recursively established when we
# initialize.
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
class TestInotifyTrees(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTrees, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTrees([path1, path2])
with open(os.path.join(path1, 'seen_new_file1'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file2'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file2'),
]
self.assertEquals(events, expected)<|fim▁end|> | |
<|file_name|>node_auth.go<|end_file_name|><|fim▁begin|>package kubernetes
import (
"crypto/x509"
"net/http"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/auth/authenticator"
kauthorizer "k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/auth/user"
unversionedauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned"
oauthenticator "github.com/openshift/origin/pkg/auth/authenticator"<|fim▁hole|> "github.com/openshift/origin/pkg/auth/authenticator/anonymous"
"github.com/openshift/origin/pkg/auth/authenticator/request/bearertoken"
"github.com/openshift/origin/pkg/auth/authenticator/request/unionrequest"
"github.com/openshift/origin/pkg/auth/authenticator/request/x509request"
authncache "github.com/openshift/origin/pkg/auth/authenticator/token/cache"
authnremote "github.com/openshift/origin/pkg/auth/authenticator/token/remotetokenreview"
"github.com/openshift/origin/pkg/auth/group"
authorizationapi "github.com/openshift/origin/pkg/authorization/api"
oauthorizer "github.com/openshift/origin/pkg/authorization/authorizer"
authzadapter "github.com/openshift/origin/pkg/authorization/authorizer/adapter"
authzcache "github.com/openshift/origin/pkg/authorization/authorizer/cache"
authzremote "github.com/openshift/origin/pkg/authorization/authorizer/remote"
oclient "github.com/openshift/origin/pkg/client"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
)
func newAuthenticator(authenticationClient unversionedauthentication.TokenReviewsGetter, clientCAs *x509.CertPool, cacheTTL time.Duration, cacheSize int) (authenticator.Request, error) {
authenticators := []oauthenticator.Request{}
// API token auth
var (
tokenAuthenticator oauthenticator.Token
err error
)
// Authenticate against the remote master
tokenAuthenticator, err = authnremote.NewAuthenticator(authenticationClient)
if err != nil {
return nil, err
}
// Cache results
if cacheTTL > 0 && cacheSize > 0 {
tokenAuthenticator, err = authncache.NewAuthenticator(tokenAuthenticator, cacheTTL, cacheSize)
if err != nil {
return nil, err
}
}
authenticators = append(authenticators, bearertoken.New(tokenAuthenticator, true))
// Client-cert auth
if clientCAs != nil {
opts := x509request.DefaultVerifyOptions()
opts.Roots = clientCAs
certauth := x509request.New(opts, x509request.SubjectToUserConversion)
authenticators = append(authenticators, certauth)
}
ret := &unionrequest.Authenticator{
// Anonymous requests will pass the token and cert checks without errors
// Bad tokens or bad certs will produce errors, in which case we should not continue to authenticate them as "system:anonymous"
FailOnError: true,
Handlers: []oauthenticator.Request{
// Add the "system:authenticated" group to users that pass token/cert authentication
group.NewGroupAdder(unionrequest.NewUnionAuthentication(authenticators...), []string{bootstrappolicy.AuthenticatedGroup}),
// Fall back to the "system:anonymous" user
anonymous.NewAuthenticator(),
},
}
return ret, nil
}
func newAuthorizerAttributesGetter(nodeName string) (kauthorizer.RequestAttributesGetter, error) {
return NodeAuthorizerAttributesGetter{nodeName}, nil
}
type NodeAuthorizerAttributesGetter struct {
nodeName string
}
func isSubpath(r *http.Request, path string) bool {
path = strings.TrimSuffix(path, "/")
return r.URL.Path == path || strings.HasPrefix(r.URL.Path, path+"/")
}
// GetRequestAttributes populates authorizer attributes for the requests to the kubelet API.
// Default attributes are {apiVersion=v1,verb=proxy,resource=nodes,resourceName=<node name>}
// More specific verb/resource is set for the following request patterns:
// /stats/* => verb=<api verb from request>, resource=nodes/stats
// /metrics/* => verb=<api verb from request>, resource=nodes/metrics
// /logs/* => verb=<api verb from request>, resource=nodes/log
func (n NodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *http.Request) kauthorizer.Attributes {
// Default verb/resource is proxy nodes, which allows full access to the kubelet API
attrs := oauthorizer.DefaultAuthorizationAttributes{
APIVersion: "v1",
APIGroup: "",
Verb: "proxy",
Resource: "nodes",
ResourceName: n.nodeName,
URL: r.URL.Path,
}
namespace := ""
apiVerb := ""
switch r.Method {
case "POST":
apiVerb = "create"
case "GET":
apiVerb = "get"
case "PUT":
apiVerb = "update"
case "PATCH":
apiVerb = "patch"
case "DELETE":
apiVerb = "delete"
}
// Override verb/resource for specific paths
// Updates to these rules require updating NodeAdminRole and NodeReaderRole in bootstrap policy
switch {
case isSubpath(r, "/stats"):
attrs.Verb = apiVerb
attrs.Resource = authorizationapi.NodeStatsResource
case isSubpath(r, "/metrics"):
attrs.Verb = apiVerb
attrs.Resource = authorizationapi.NodeMetricsResource
case isSubpath(r, "/logs"):
attrs.Verb = apiVerb
attrs.Resource = authorizationapi.NodeLogResource
}
// TODO: handle other things like /healthz/*? not sure if "non-resource" urls on the kubelet make sense to authorize against master non-resource URL policy
glog.V(2).Infof("Node request attributes: namespace=%s, user=%#v, attrs=%#v", namespace, u, attrs)
return authzadapter.KubernetesAuthorizerAttributes(namespace, u, attrs)
}
func newAuthorizer(c *oclient.Client, cacheTTL time.Duration, cacheSize int) (kauthorizer.Authorizer, error) {
var (
authz oauthorizer.Authorizer
err error
)
// Authorize against the remote master
authz, err = authzremote.NewAuthorizer(c)
if err != nil {
return nil, err
}
// Cache results
if cacheTTL > 0 && cacheSize > 0 {
authz, err = authzcache.NewAuthorizer(authz, cacheTTL, cacheSize)
if err != nil {
return nil, err
}
}
// Adapt to the Kubernetes authorizer interface
kauthz, err := authzadapter.NewAuthorizer(authz)
if err != nil {
return nil, err
}
return kauthz, nil
}<|fim▁end|> | |
<|file_name|>controller_test.go<|end_file_name|><|fim▁begin|>// black-box testing
package router_test
import (
"testing"
"github.com/kataras/iris"
"github.com/kataras/iris/context"
"github.com/kataras/iris/core/router"
"github.com/kataras/iris/httptest"
)
type testController struct {
router.Controller
}
var writeMethod = func(c router.Controller) {
c.Ctx.Writef(c.Ctx.Method())
}
func (c *testController) Get() {
writeMethod(c.Controller)
}
func (c *testController) Post() {
writeMethod(c.Controller)
}
func (c *testController) Put() {
writeMethod(c.Controller)
}
func (c *testController) Delete() {
writeMethod(c.Controller)
}
func (c *testController) Connect() {
writeMethod(c.Controller)
}
func (c *testController) Head() {
writeMethod(c.Controller)
}
func (c *testController) Patch() {
writeMethod(c.Controller)
}
func (c *testController) Options() {
writeMethod(c.Controller)
}
func (c *testController) Trace() {
writeMethod(c.Controller)
}
type (
testControllerAll struct{ router.Controller }
testControllerAny struct{ router.Controller } // exactly same as All
)
func (c *testControllerAll) All() {
writeMethod(c.Controller)
}
func (c *testControllerAny) All() {
writeMethod(c.Controller)
}
func TestControllerMethodFuncs(t *testing.T) {
app := iris.New()
app.Controller("/", new(testController))
app.Controller("/all", new(testControllerAll))
app.Controller("/any", new(testControllerAny))
e := httptest.New(t, app)
for _, method := range router.AllMethods {
e.Request(method, "/").Expect().Status(httptest.StatusOK).
Body().Equal(method)
e.Request(method, "/all").Expect().Status(httptest.StatusOK).
Body().Equal(method)
e.Request(method, "/any").Expect().Status(httptest.StatusOK).
Body().Equal(method)
}
}
type testControllerPersistence struct {<|fim▁hole|> Data string `iris:"persistence"`
}
func (t *testControllerPersistence) Get() {
t.Ctx.WriteString(t.Data)
}
func TestControllerPersistenceFields(t *testing.T) {
data := "this remains the same for all requests"
app := iris.New()
app.Controller("/", &testControllerPersistence{Data: data})
e := httptest.New(t, app)
e.GET("/").Expect().Status(httptest.StatusOK).
Body().Equal(data)
}
type testControllerBeginAndEndRequestFunc struct {
router.Controller
Username string
}
// called before of every method (Get() or Post()).
//
// useful when more than one methods using the
// same request values or context's function calls.
func (t *testControllerBeginAndEndRequestFunc) BeginRequest(ctx context.Context) {
t.Username = ctx.Params().Get("username")
// or t.Params.Get("username") because the
// t.Ctx == ctx and is being initialized before this "BeginRequest"
}
// called after every method (Get() or Post()).
func (t *testControllerBeginAndEndRequestFunc) EndRequest(ctx context.Context) {
ctx.Writef("done") // append "done" to the response
}
func (t *testControllerBeginAndEndRequestFunc) Get() {
t.Ctx.Writef(t.Username)
}
func (t *testControllerBeginAndEndRequestFunc) Post() {
t.Ctx.Writef(t.Username)
}
func TestControllerBeginAndEndRequestFunc(t *testing.T) {
app := iris.New()
app.Controller("/profile/{username}", new(testControllerBeginAndEndRequestFunc))
e := httptest.New(t, app)
usernames := []string{
"kataras",
"makis",
"efi",
"rg",
"bill",
"whoisyourdaddy",
}
doneResponse := "done"
for _, username := range usernames {
e.GET("/profile/" + username).Expect().Status(httptest.StatusOK).
Body().Equal(username + doneResponse)
e.POST("/profile/" + username).Expect().Status(httptest.StatusOK).
Body().Equal(username + doneResponse)
}
}<|fim▁end|> | router.Controller |
<|file_name|>program_tree_version.py<|end_file_name|><|fim▁begin|>##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import contextlib
import warnings
from _decimal import Decimal
from typing import Optional, List
from django.db import IntegrityError
from django.db.models import F, Case, When, IntegerField, QuerySet, Max, OuterRef, Subquery
from django.db.models import Q
from base.models.academic_year import AcademicYear
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_categories import Categories
from education_group.ddd.domain.exception import TrainingNotFoundException
from education_group.models.group import Group
from education_group.models.group_year import GroupYear
from osis_common.ddd import interface
from osis_common.ddd.interface import RootEntity
from program_management import formatter
from program_management.ddd import command
from program_management.ddd.business_types import *
from program_management.ddd.domain import exception
from program_management.ddd.domain import program_tree
from program_management.ddd.domain import program_tree_version
from program_management.ddd.domain.exception import ProgramTreeVersionNotFoundException
from program_management.ddd.domain.program_tree_version import ProgramTreeVersionIdentity, STANDARD, NOT_A_TRANSITION
from program_management.ddd.dtos import UniteEnseignementDTO, ContenuNoeudDTO, ProgrammeDeFormationDTO
from program_management.ddd.repositories import program_tree as program_tree_repository
from program_management.models.education_group_version import EducationGroupVersion
class ProgramTreeVersionRepository(interface.AbstractRepository):
@classmethod
def save(cls, entity: RootEntity) -> None:
raise NotImplementedError
@classmethod
def create(
cls,
program_tree_version: 'ProgramTreeVersion',
**_
) -> 'ProgramTreeVersionIdentity':
warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2)
offer_acronym = program_tree_version.entity_id.offer_acronym
year = program_tree_version.entity_id.year
try:
education_group_year_id = EducationGroupYear.objects.filter(
acronym=offer_acronym,
academic_year__year=year,
).values_list(
'pk', flat=True
)[0]
except IndexError:
raise TrainingNotFoundException(acronym=offer_acronym, year=year)
group_year_id = GroupYear.objects.filter(
partial_acronym=program_tree_version.program_tree_identity.code,
academic_year__year=program_tree_version.program_tree_identity.year,
).values_list(
'pk', flat=True
)[0]
try:
educ_group_version = EducationGroupVersion.objects.create(
version_name=program_tree_version.version_name,
title_fr=program_tree_version.title_fr,
title_en=program_tree_version.title_en,
offer_id=education_group_year_id,
transition_name=program_tree_version.entity_id.transition_name,
root_group_id=group_year_id,
)
_update_start_year_and_end_year(
educ_group_version,
program_tree_version.start_year,
program_tree_version.end_year_of_existence
)
except IntegrityError as ie:
raise exception.ProgramTreeAlreadyExistsException
return program_tree_version.entity_id
@classmethod
def update(cls, program_tree_version: 'ProgramTreeVersion', **_) -> 'ProgramTreeVersionIdentity':
warnings.warn("DEPRECATED : use .save() function instead", DeprecationWarning, stacklevel=2)
obj = EducationGroupVersion.objects.get(
offer__acronym=program_tree_version.entity_identity.offer_acronym,
offer__academic_year__year=program_tree_version.entity_identity.year,
version_name=program_tree_version.entity_identity.version_name,
transition_name=program_tree_version.entity_identity.transition_name,
)
obj.version_name = program_tree_version.version_name
obj.title_fr = program_tree_version.title_fr
obj.title_en = program_tree_version.title_en
obj.save()
_update_start_year_and_end_year(
obj,
program_tree_version.start_year,
program_tree_version.end_year_of_existence
)
return program_tree_version.entity_id
@classmethod
def get(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion':
qs = _get_common_queryset().filter(
version_name=entity_id.version_name,
offer__acronym=entity_id.offer_acronym,
offer__academic_year__year=entity_id.year,
transition_name=entity_id.transition_name,
)<|fim▁hole|> return _instanciate_tree_version(qs.get())
except EducationGroupVersion.DoesNotExist:
raise exception.ProgramTreeVersionNotFoundException()
@classmethod
def get_last_in_past(cls, entity_id: 'ProgramTreeVersionIdentity') -> 'ProgramTreeVersion':
qs = EducationGroupVersion.objects.filter(
version_name=entity_id.version_name,
offer__acronym=entity_id.offer_acronym,
offer__academic_year__year__lt=entity_id.year,
transition_name=entity_id.transition_name
).order_by(
'offer__academic_year'
).values_list(
'offer__academic_year__year',
flat=True,
)
if qs:
last_past_year = qs.last()
last_identity = ProgramTreeVersionIdentity(
offer_acronym=entity_id.offer_acronym,
year=last_past_year,
version_name=entity_id.version_name,
transition_name=entity_id.transition_name,
)
return cls.get(entity_id=last_identity)
@classmethod
def search(
cls,
entity_ids: Optional[List['ProgramTreeVersionIdentity']] = None,
version_name: str = None,
offer_acronym: str = None,
transition_name: str = None,
code: str = None,
year: int = None,
**kwargs
) -> List['ProgramTreeVersion']:
qs = _get_common_queryset()
if "element_ids" in kwargs:
qs = qs.filter(root_group__element__in=kwargs['element_ids'])
if version_name is not None:
qs = qs.filter(version_name=version_name)
if offer_acronym is not None:
qs = qs.filter(offer__acronym=offer_acronym)
if transition_name is not None:
qs = qs.filter(transition_name=transition_name)
if year is not None:
qs = qs.filter(offer__academic_year__year=year)
if code is not None:
qs = qs.filter(root_group__partial_acronym=code)
results = []
for record_dict in qs:
results.append(_instanciate_tree_version(record_dict))
return results
@classmethod
def delete(
cls,
entity_id: 'ProgramTreeVersionIdentity',
delete_program_tree_service: interface.ApplicationService = None
) -> None:
program_tree_version = cls.get(entity_id)
EducationGroupVersion.objects.filter(
version_name=entity_id.version_name,
offer__acronym=entity_id.offer_acronym,
offer__academic_year__year=entity_id.year,
transition_name=entity_id.transition_name,
).delete()
root_node = program_tree_version.get_tree().root_node
cmd = command.DeleteProgramTreeCommand(code=root_node.code, year=root_node.year)
delete_program_tree_service(cmd)
@classmethod
def search_all_versions_from_root_node(cls, root_node_identity: 'NodeIdentity') -> List['ProgramTreeVersion']:
offer_ids = EducationGroupVersion.objects.filter(
root_group__partial_acronym=root_node_identity.code,
root_group__academic_year__year=root_node_identity.year
).values_list('offer_id', flat=True)
return _search_versions_from_offer_ids(list(offer_ids))
@classmethod
def search_all_versions_from_root_nodes(cls, node_identities: List['NodeIdentity']) -> List['ProgramTreeVersion']:
offer_ids = _search_by_node_entities(list(node_identities))
return _search_versions_from_offer_ids(offer_ids)
@classmethod
def search_versions_from_trees(cls, trees: List['ProgramTree']) -> List['ProgramTreeVersion']:
root_nodes_identities = [tree.root_node.entity_id for tree in trees]
tree_versions = cls.search_all_versions_from_root_nodes(root_nodes_identities)
result = []
for tree_version in tree_versions:
with contextlib.suppress(StopIteration):
tree_version.tree = next(tree for tree in trees if tree.entity_id == tree_version.program_tree_identity)
result.append(tree_version)
return result
@classmethod
def search_last_occurence(cls, from_year: int) -> List['ProgramTreeVersion']:
subquery_max_existing_year_for_offer = EducationGroupVersion.objects.filter(
offer__academic_year__year__gte=from_year,
offer__education_group=OuterRef("offer__education_group"),
version_name=OuterRef('version_name'),
transition_name=OuterRef('transition_name')
).values(
"offer__education_group"
).annotate(
max_year=Max("offer__academic_year__year")
).order_by(
"offer__education_group"
).values("max_year")
qs = _get_common_queryset().filter(
offer__academic_year__year=Subquery(subquery_max_existing_year_for_offer[:1])
)
results = []
for record_dict in qs:
results.append(_instanciate_tree_version(record_dict))
return results
@classmethod
def get_dto(cls, identity: ProgramTreeVersionIdentity) -> Optional['ProgrammeDeFormationDTO']:
pgm_tree_version = cls.get(identity)
return build_dto(pgm_tree_version, identity)
@classmethod
def get_dto_from_year_and_code(cls, code: str, year: int) -> Optional['ProgrammeDeFormationDTO']:
pgm_tree_version = cls.search(code=code, year=year)
if pgm_tree_version:
return build_dto(pgm_tree_version[0], pgm_tree_version[0].entity_identity)
raise ProgramTreeVersionNotFoundException
def _update_start_year_and_end_year(
educ_group_version: EducationGroupVersion,
start_year: int,
end_year_of_existence: int
):
# FIXME :: should add a field EducationgroupVersion.end_year
# FIXME :: and should remove GroupYear.end_year
# FIXME :: End_year is useful only for EducationGroupYear (training, minitraining) and programTreeVersions.
# FIXME :: End year is not useful for Groups. For business, Group doesn't have a 'end date'.
group = Group.objects.get(
groupyear__educationgroupversion__pk=educ_group_version.pk
)
end_year_id = None
if end_year_of_existence:
end_year_id = AcademicYear.objects.only('pk').get(year=end_year_of_existence).pk
group.end_year_id = end_year_id
group.start_year_id = AcademicYear.objects.only('pk').get(year=start_year).pk
group.save()
def _instanciate_tree_version(record_dict: dict) -> 'ProgramTreeVersion':
identity = program_tree_version.ProgramTreeVersionIdentity(
offer_acronym=record_dict['offer_acronym'],
year=record_dict['offer_year'],
version_name=record_dict['version_name'],
transition_name=record_dict['transition_name'],
)
return program_tree_version.ProgramTreeVersion(
entity_identity=identity,
entity_id=identity,
program_tree_identity=program_tree.ProgramTreeIdentity(record_dict['code'], record_dict['offer_year']),
program_tree_repository=program_tree_repository.ProgramTreeRepository(),
start_year=record_dict['start_year'],
title_fr=record_dict['version_title_fr'],
title_en=record_dict['version_title_en'],
end_year_of_existence=record_dict['end_year_of_existence'],
)
def _search_by_node_entities(entity_ids: List['NodeIdentity']) -> List[int]:
if bool(entity_ids):
qs = EducationGroupVersion.objects.all().values_list('offer_id', flat=True)
filter_search_from = _build_where_clause(entity_ids[0])
for identity in entity_ids[1:]:
filter_search_from |= _build_where_clause(identity)
qs = qs.filter(filter_search_from)
return list(qs)
return []
def _build_where_clause(node_identity: 'NodeIdentity') -> Q:
return Q(
Q(
root_group__partial_acronym=node_identity.code,
root_group__academic_year__year=node_identity.year
)
)
def _search_versions_from_offer_ids(offer_ids: List[int]) -> List['ProgramTreeVersion']:
qs = _get_common_queryset()
qs = qs.filter(
offer_id__in=offer_ids,
)
results = []
for record_dict in qs:
results.append(_instanciate_tree_version(record_dict))
return results
def _get_common_queryset() -> QuerySet:
return EducationGroupVersion.objects.all().order_by(
'version_name'
).annotate(
code=F('root_group__partial_acronym'),
offer_acronym=F('offer__acronym'),
offer_year=F('offer__academic_year__year'),
version_title_fr=F('title_fr'),
version_title_en=F('title_en'),
# FIXME :: should add a field EducationgroupVersion.end_year
# FIXME :: and should remove GroupYear.end_year
# FIXME :: End_year is useful only for EducationGroupYear (training, minitraining) and programTreeVersions.
# FIXME :: End year is not useful for Groups. For business, Group doesn't have a 'end date'.
end_year_of_existence=Case(
When(
Q(
offer__education_group_type__category__in={
Categories.TRAINING.name, Categories.MINI_TRAINING.name
}
) & Q(
version_name=STANDARD
) & Q(
transition_name=NOT_A_TRANSITION
),
then=F('offer__education_group__end_year__year')
),
default=F('root_group__group__end_year__year'),
output_field=IntegerField(),
),
start_year=Case(
When(
Q(
offer__education_group_type__category__in={
Categories.TRAINING.name, Categories.MINI_TRAINING.name
}
) & Q(
version_name=STANDARD
) & Q(
transition_name=NOT_A_TRANSITION
),
then=F('offer__education_group__start_year__year')
),
default=F('root_group__group__start_year__year'),
output_field=IntegerField(),
),
).values(
'code',
'offer_acronym',
'offer_year',
'version_name',
'version_title_fr',
'version_title_en',
'transition_name',
'end_year_of_existence',
'start_year',
)
def build_dto(pgm_tree_version: 'ProgramTreeVersion', identity: ProgramTreeVersionIdentity) \
-> 'ProgrammeDeFormationDTO':
tree = pgm_tree_version.get_tree()
contenu = _build_contenu(tree.root_node, )
return ProgrammeDeFormationDTO(
racine=contenu,
annee=identity.year,
sigle=identity.offer_acronym,
version=identity.version_name,
intitule_formation="{}{}".format(
tree.root_node.offer_title_fr,
"{}".format("[ {} ]".format(pgm_tree_version.title_fr) if pgm_tree_version.title_fr else '')
),
code=tree.entity_id.code,
transition_name=identity.transition_name
)
def _build_contenu(node: 'Node', lien_parent: 'Link' = None) -> 'ContenuNoeudDTO':
contenu_ordonne = []
for lien in node.children:
if lien.child.is_learning_unit():
contenu_ordonne.append(
UniteEnseignementDTO(
bloc=lien.block,
code=lien.child.code,
intitule_complet=lien.child.title,
quadrimestre=lien.child.quadrimester,
quadrimestre_texte=lien.child.quadrimester.value if lien.child.quadrimester else "",
credits_absolus=lien.child.credits,
volume_annuel_pm=lien.child.volume_total_lecturing,
volume_annuel_pp=lien.child.volume_total_practical,
obligatoire=lien.is_mandatory if lien else False,
session_derogation='',
credits_relatifs=lien.relative_credits,
)
)
else:
groupement_contenu = _build_contenu(lien.child, lien_parent=lien)
contenu_ordonne.append(groupement_contenu)
return ContenuNoeudDTO(
code=node.code,
intitule=node.title,
remarque=node.remark_fr,
obligatoire=lien_parent.is_mandatory if lien_parent else False,
credits=_get_credits(lien_parent),
intitule_complet=get_verbose_title_group(node),
contenu_ordonne=contenu_ordonne,
)
def get_verbose_title_group(node: 'NodeGroupYear') -> str:
if node.is_finality():
return format_complete_title_label(node, node.offer_partial_title_fr)
if node.is_option():
return format_complete_title_label(node, node.offer_title_fr)
else:
return node.group_title_fr
def format_complete_title_label(node, title_fr) -> str:
version_complete_label = formatter.format_version_complete_name(node, "fr-be")
return "{}{}".format(title_fr, version_complete_label)
def _get_credits(link: 'Link') -> Optional[Decimal]:
if link:
return link.relative_credits or link.child.credits or 0
return None<|fim▁end|> | try: |
<|file_name|>healthcheck_test.go<|end_file_name|><|fim▁begin|>package integration
import (
"bytes"
"net/http"
"os"
"time"
"github.com/containous/traefik/integration/try"
"github.com/go-check/check"
checker "github.com/vdemeester/shakers"
)
// HealthCheck test suites (using libcompose)
type HealthCheckSuite struct {
BaseSuite
whoami1IP string
whoami2IP string
}
func (s *HealthCheckSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "healthcheck")
s.composeProject.Start(c)
s.whoami1IP = s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
s.whoami2IP = s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
}
func (s *HealthCheckSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/simple.toml", struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:test.localhost"))
c.Assert(err, checker.IsNil)
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
// Fix all whoami health to 500
client := &http.Client{}
whoamiHosts := []string{s.whoami1IP, s.whoami2IP}
for _, whoami := range whoamiHosts {
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBuffer([]byte("500")))
c.Assert(err, checker.IsNil)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
}
// Waiting for Traefik healthcheck
try.Sleep(2 * time.Second)
// Verify no backend service is available due to failing health checks
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
// Change one whoami health to 200
statusOKReq1, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBuffer([]byte("200")))
c.Assert(err, checker.IsNil)
_, err = client.Do(statusOKReq1)
c.Assert(err, checker.IsNil)
<|fim▁hole|> frontendReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
frontendReq.Host = "test.localhost"
// Check if whoami1 responds
err = try.Request(frontendReq, 500*time.Millisecond, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.IsNil)
// Check if the service with bad health check (whoami2) never respond.
err = try.Request(frontendReq, 2*time.Second, try.BodyContains(s.whoami2IP))
c.Assert(err, checker.Not(checker.IsNil))
// TODO validate : run on 80
resp, err := http.Get("http://127.0.0.1:8000/")
// Expected a 404 as we did not configure anything
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
}
func (s *HealthCheckSuite) TestMultipleEntrypointsWrr(c *check.C) {
s.doTestMultipleEntrypoints(c, "fixtures/healthcheck/multiple-entrypoints-wrr.toml")
}
func (s *HealthCheckSuite) TestMultipleEntrypointsDrr(c *check.C) {
s.doTestMultipleEntrypoints(c, "fixtures/healthcheck/multiple-entrypoints-drr.toml")
}
func (s *HealthCheckSuite) doTestMultipleEntrypoints(c *check.C, fixture string) {
file := s.adaptFile(c, fixture, struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(file)
cmd, _ := s.cmdTraefik(withConfigFile(file))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// Wait for traefik
err = try.GetRequest("http://localhost:8080/api/providers", 60*time.Second, try.BodyContains("Host:test.localhost"))
c.Assert(err, checker.IsNil)
// Check entrypoint http1
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
// Check entrypoint http2
frontendHealthReq, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:9000/health", nil)
c.Assert(err, checker.IsNil)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
// Set one whoami health to 500
client := &http.Client{}
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBuffer([]byte("500")))
c.Assert(err, checker.IsNil)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
// Waiting for Traefik healthcheck
try.Sleep(2 * time.Second)
frontend1Req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
frontend1Req.Host = "test.localhost"
frontend2Req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:9000/", nil)
c.Assert(err, checker.IsNil)
frontend2Req.Host = "test.localhost"
// Check if whoami1 never responds
err = try.Request(frontend2Req, 2*time.Second, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.Not(checker.IsNil))
// Check if whoami1 never responds
err = try.Request(frontend1Req, 2*time.Second, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.Not(checker.IsNil))
}<|fim▁end|> | // Verify frontend health : after
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
|
<|file_name|>BinaryTreeLevelOrderTraversal2.py<|end_file_name|><|fim▁begin|><|fim▁hole|># self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
list = []
self.helper(list, root, 0)
return list[::-1]
def helper(self, list, root, level):
if root == None:
return
if level >= len(list):
list.append([])
list[level].append(root.val)
self.helper(list, root.left, level + 1)
self.helper(list, root.right, level + 1)
from TestObjects import *
b = BinaryTree()
s = Solution()
print s.levelOrderBottom(b.root)<|fim▁end|> | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x |
<|file_name|>TopologyTestRunResource.java<|end_file_name|><|fim▁begin|>/**
* Copyright 2017 Hortonworks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package com.hortonworks.streamline.streams.service;
import com.codahale.metrics.annotation.Timed;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.hortonworks.streamline.common.exception.service.exception.request.BadRequestException;
import com.hortonworks.streamline.common.exception.service.exception.request.EntityNotFoundException;
import com.hortonworks.streamline.common.exception.service.exception.server.UnhandledServerException;
import com.hortonworks.streamline.common.util.WSUtils;
import com.hortonworks.streamline.streams.actions.topology.service.TopologyActionsService;
import com.hortonworks.streamline.streams.catalog.Topology;
import com.hortonworks.streamline.streams.catalog.TopologySink;
import com.hortonworks.streamline.streams.catalog.TopologySource;
import com.hortonworks.streamline.streams.catalog.TopologyTestRunCase;
import com.hortonworks.streamline.streams.catalog.TopologyTestRunCaseSink;
import com.hortonworks.streamline.streams.catalog.TopologyTestRunCaseSource;
import com.hortonworks.streamline.streams.catalog.TopologyTestRunHistory;
import com.hortonworks.streamline.streams.catalog.service.StreamCatalogService;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.BooleanUtils;
import org.datanucleus.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
import static java.util.stream.Collectors.toList;
import static javax.ws.rs.core.Response.Status.CREATED;
import static javax.ws.rs.core.Response.Status.OK;
@Path("/v1/catalog")
@Produces(MediaType.APPLICATION_JSON)
public class TopologyTestRunResource {
private static final Logger LOG = LoggerFactory.getLogger(TopologyTestRunResource.class);
private static final Integer DEFAULT_LIST_ENTITIES_COUNT = 5;
public static final Charset ENCODING_UTF_8 = Charset.forName("UTF-8");
private final StreamCatalogService catalogService;
private final TopologyActionsService actionsService;
private final ObjectMapper objectMapper;
public TopologyTestRunResource(StreamCatalogService catalogService, TopologyActionsService actionsService) {
this.catalogService = catalogService;
this.actionsService = actionsService;
this.objectMapper = new ObjectMapper();
}
@POST
@Path("/topologies/{topologyId}/actions/testrun")
@Timed
public Response testRunTopology (@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
String testRunInputJson) throws Exception {
Topology result = catalogService.getTopology(topologyId);
if (result != null) {
TopologyTestRunHistory history = actionsService.testRunTopology(result, testRunInputJson);
return WSUtils.respondEntity(history, OK);
}
throw EntityNotFoundException.byId(topologyId.toString());
}
@GET
@Path("/topologies/{topologyId}/testhistories")
@Timed
public Response getHistoriesOfTestRunTopology (@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@QueryParam("limit") Integer limit) throws Exception {
Collection<TopologyTestRunHistory> histories = catalogService.listTopologyTestRunHistory(topologyId);
if (histories == null) {
throw EntityNotFoundException.byFilter("topology id " + topologyId);
}
List<TopologyTestRunHistory> filteredHistories = filterHistories(limit, histories);
return WSUtils.respondEntities(filteredHistories, OK);
}
@GET
@Path("/topologies/{topologyId}/versions/{versionId}/testhistories")
@Timed
public Response getHistoriesOfTestRunTopology (@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("versionId") Long versionId,
@QueryParam("limit") Integer limit) throws Exception {
Collection<TopologyTestRunHistory> histories = catalogService.listTopologyTestRunHistory(topologyId, versionId);
if (histories == null) {
throw EntityNotFoundException.byFilter("topology id " + topologyId);
}
List<TopologyTestRunHistory> filteredHistories = filterHistories(limit, histories);
return WSUtils.respondEntities(filteredHistories, OK);
}
@GET
@Path("/topologies/{topologyId}/testhistories/{historyId}")
@Timed
public Response getHistoryOfTestRunTopology (@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("historyId") Long historyId,
@QueryParam("simplify") Boolean simplify) throws Exception {
TopologyTestRunHistory history = catalogService.getTopologyTestRunHistory(historyId);
if (history == null) {
throw EntityNotFoundException.byId(String.valueOf(historyId));
}
if (!history.getTopologyId().equals(topologyId)) {
throw BadRequestException.message("Test history " + historyId + " is not belong to topology " + topologyId);
}
if (BooleanUtils.isTrue(simplify)) {
return WSUtils.respondEntity(new SimplifiedTopologyTestRunHistory(history), OK);
} else {
return WSUtils.respondEntity(history, OK);
}
}
@GET
@Path("/topologies/{topologyId}/testhistories/{historyId}/events")
public Response getEventsOfTestRunTopologyHistory(@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("historyId") Long historyId) throws Exception {
return getEventsOfTestRunTopologyHistory(topologyId, historyId, null);
}
@GET
@Path("/topologies/{topologyId}/testhistories/{historyId}/events/{componentName}")
public Response getEventsOfTestRunTopologyHistory(@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("historyId") Long historyId,
@PathParam("componentName") String componentName) throws Exception {
return getEventsOfTestRunTopologyHistory(topologyId, historyId, componentName);
}
@GET
@Path("/topologies/{topologyId}/testhistories/{historyId}/events/download")
@Produces(MediaType.APPLICATION_OCTET_STREAM)
public Response downloadEventsOfTestRunTopologyHistory(@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("historyId") Long historyId) throws Exception {
File eventLogFile = getEventLogFile(topologyId, historyId);
String content = FileUtils.readFileToString(eventLogFile, ENCODING_UTF_8);
InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8));
String fileName = String.format("events-topology-%d-history-%d.log", topologyId, historyId);
return Response.status(OK)
.entity(is)
.header("Content-Disposition", "attachment; filename=\"" + fileName + "\"")
.build();
}
private Response getEventsOfTestRunTopologyHistory(Long topologyId, Long historyId, String componentName) throws IOException {
File eventLogFile = getEventLogFile(topologyId, historyId);
List<String> lines = FileUtils.readLines(eventLogFile, ENCODING_UTF_8);
Stream<Map<String, Object>> eventsStream = lines.stream().map(line -> {
try {
return objectMapper.readValue(line, new TypeReference<Map<String, Object>>() {});
} catch (IOException e) {
throw new RuntimeException(e);
}
});
if (!StringUtils.isEmpty(componentName)) {
eventsStream = eventsStream.filter(event -> {
String eventComponentName = (String) event.get("componentName");
return eventComponentName != null && eventComponentName.equals(componentName);
});
}
return WSUtils.respondEntities(eventsStream.collect(toList()), OK);
}
private File getEventLogFile(Long topologyId, Long historyId) {
TopologyTestRunHistory history = catalogService.getTopologyTestRunHistory(historyId);
if (history == null) {
throw EntityNotFoundException.byId(String.valueOf(historyId));
}
if (!history.getTopologyId().equals(topologyId)) {
throw BadRequestException.message("Test history " + historyId + " is not belong to topology " + topologyId);
}
String eventLogFilePath = history.getEventLogFilePath();
File eventLogFile = new File(eventLogFilePath);
if (!eventLogFile.exists() || eventLogFile.isDirectory() || !eventLogFile.canRead()) {
throw BadRequestException.message("Event log file of history " + historyId + " does not exist or is not readable.");
}
return eventLogFile;
}
private List<TopologyTestRunHistory> filterHistories(Integer limit, Collection<TopologyTestRunHistory> histories) {
if (limit == null) {
limit = DEFAULT_LIST_ENTITIES_COUNT;
}
return histories.stream()
// reverse order
.sorted((h1, h2) -> (int) (h2.getId() - h1.getId()))
.limit(limit)
.collect(toList());
}
@POST
@Path("/topologies/{topologyId}/testcases")
public Response addTestRunCase(@PathParam("topologyId") Long topologyId,<|fim▁hole|> TopologyTestRunCase addedCase = catalogService.addTopologyTestRunCase(testRunCase);
return WSUtils.respondEntity(addedCase, CREATED);
}
@POST
@Path("/topologies/{topologyId}/versions/{versionId}/testcases")
public Response addTestRunCase(@PathParam("topologyId") Long topologyId,
@PathParam("versionId") Long versionId,
TopologyTestRunCase testRunCase) {
testRunCase.setTopologyId(topologyId);
testRunCase.setVersionId(versionId);
TopologyTestRunCase addedCase = catalogService.addTopologyTestRunCase(testRunCase);
return WSUtils.respondEntity(addedCase, CREATED);
}
@PUT
@Path("/topologies/{topologyId}/testcases/{testCaseId}")
public Response addOrUpdateTestRunCase(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
TopologyTestRunCase testRunCase) {
testRunCase.setTopologyId(topologyId);
testRunCase.setId(testCaseId);
TopologyTestRunCase updatedCase = catalogService.addOrUpdateTopologyTestRunCase(topologyId, testRunCase);
return WSUtils.respondEntity(updatedCase, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases/{testCaseId}")
public Response getTestRunCase(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId) {
TopologyTestRunCase testcase = catalogService.getTopologyTestRunCase(topologyId, testCaseId);
if (testcase == null) {
throw EntityNotFoundException.byId(Long.toString(testCaseId));
}
return WSUtils.respondEntity(testcase, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases")
@Timed
public Response listTestRunCases(@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@QueryParam("limit") Integer limit) throws Exception {
Long currentVersionId = catalogService.getCurrentVersionId(topologyId);
Collection<TopologyTestRunCase> cases = catalogService.listTopologyTestRunCase(topologyId, currentVersionId);
if (cases == null) {
throw EntityNotFoundException.byFilter("topology id " + topologyId);
}
List<TopologyTestRunCase> filteredCases = filterTestRunCases(limit, cases);
return WSUtils.respondEntities(filteredCases, OK);
}
@GET
@Path("/topologies/{topologyId}/versions/{versionId}/testcases")
@Timed
public Response listTestRunCases(@Context UriInfo urlInfo,
@PathParam("topologyId") Long topologyId,
@PathParam("versionId") Long versionId,
@QueryParam("limit") Integer limit) throws Exception {
Collection<TopologyTestRunCase> cases = catalogService.listTopologyTestRunCase(topologyId, versionId);
if (cases == null) {
throw EntityNotFoundException.byFilter("topology id " + topologyId);
}
List<TopologyTestRunCase> filteredCases = filterTestRunCases(limit, cases);
return WSUtils.respondEntities(filteredCases, OK);
}
@DELETE
@Path("/topologies/{topologyId}/testcases/{testCaseId}")
public Response removeTestRunCase(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId) {
TopologyTestRunCase testRunCase = catalogService.removeTestRunCase(topologyId, testCaseId);
if (testRunCase != null) {
return WSUtils.respondEntity(testRunCase, OK);
}
throw EntityNotFoundException.byId(testCaseId.toString());
}
private List<TopologyTestRunCase> filterTestRunCases(Integer limit, Collection<TopologyTestRunCase> cases) {
if (limit == null) {
limit = DEFAULT_LIST_ENTITIES_COUNT;
}
return cases.stream()
// reverse order
.sorted((h1, h2) -> (int) (h2.getId() - h1.getId()))
.limit(limit)
.collect(toList());
}
@POST
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sources")
public Response addTestRunCaseSource(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
TopologyTestRunCaseSource testRunCaseSource) {
TopologySource topologySource = getAssociatedTopologySource(topologyId, testCaseId, testRunCaseSource.getSourceId());
testRunCaseSource.setVersionId(topologySource.getVersionId());
TopologyTestRunCaseSource addedCaseSource = catalogService.addTopologyTestRunCaseSource(testRunCaseSource);
return WSUtils.respondEntity(addedCaseSource, CREATED);
}
@PUT
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sources/{id}")
public Response addOrUpdateTestRunCaseSource(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
@PathParam("id") Long id,
TopologyTestRunCaseSource testRunCaseSource) {
testRunCaseSource.setId(id);
testRunCaseSource.setTestCaseId(testCaseId);
TopologySource topologySource = getAssociatedTopologySource(topologyId, testCaseId, testRunCaseSource.getSourceId());
testRunCaseSource.setVersionId(topologySource.getVersionId());
TopologyTestRunCaseSource updatedCase = catalogService.addOrUpdateTopologyTestRunCaseSource(testRunCaseSource.getId(), testRunCaseSource);
return WSUtils.respondEntity(updatedCase, OK);
}
private TopologySource getAssociatedTopologySource(Long topologyId, Long testCaseId, Long topologySourceId) {
TopologyTestRunCase testCase = catalogService.getTopologyTestRunCase(topologyId, testCaseId);
if (testCase == null) {
throw EntityNotFoundException.byId("Topology test case with topology id " + topologyId +
" and test case id " + testCaseId);
}
TopologySource topologySource = catalogService.getTopologySource(topologyId, topologySourceId,
testCase.getVersionId());
if (topologySource == null) {
throw EntityNotFoundException.byId("Topology source with topology id " + topologyId +
" and version id " + testCase.getVersionId());
} else if (!testCase.getVersionId().equals(topologySource.getVersionId())) {
throw new IllegalStateException("Test case and topology source point to the different version id: "
+ "version id of test case: " + testCase.getVersionId() + " / "
+ "version id of topology source: " + topologySource.getVersionId());
}
return topologySource;
}
@GET
@Path("/topologies/{topologyId}/testcases/{testcaseId}/sources/{id}")
public Response getTestRunCaseSource(@PathParam("topologyId") Long topologyId,
@PathParam("testcaseId") Long testcaseId,
@PathParam("id") Long id) {
TopologyTestRunCaseSource testCaseSource = catalogService.getTopologyTestRunCaseSource(testcaseId, id);
if (testCaseSource == null) {
throw EntityNotFoundException.byId(Long.toString(id));
}
return WSUtils.respondEntity(testCaseSource, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sources/topologysource/{sourceId}")
public Response getTestRunCaseSourceByTopologySource(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
@PathParam("sourceId") Long sourceId) {
TopologyTestRunCaseSource testCaseSource = catalogService.getTopologyTestRunCaseSourceBySourceId(testCaseId, sourceId);
if (testCaseSource == null) {
throw EntityNotFoundException.byId("test case id: " + testCaseId + " , topology source id: " + sourceId);
}
return WSUtils.respondEntity(testCaseSource, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sources")
public Response listTestRunCaseSource(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId) {
Collection<TopologyTestRunCaseSource> sources = catalogService.listTopologyTestRunCaseSource(topologyId, testCaseId);
if (sources == null) {
throw EntityNotFoundException.byFilter("topologyId: " + topologyId + " / testCaseId: " + testCaseId);
}
return WSUtils.respondEntities(sources, OK);
}
@POST
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sinks")
public Response addTestRunCaseSink(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
TopologyTestRunCaseSink testRunCaseSink) {
TopologySink topologySink = getAssociatedTopologySink(topologyId, testCaseId, testRunCaseSink.getSinkId());
testRunCaseSink.setVersionId(topologySink.getVersionId());
TopologyTestRunCaseSink addedCaseSink = catalogService.addTopologyTestRunCaseSink(testRunCaseSink);
return WSUtils.respondEntity(addedCaseSink, CREATED);
}
@PUT
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sinks/{id}")
public Response addOrUpdateTestRunCaseSink(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
@PathParam("id") Long id,
TopologyTestRunCaseSink testRunCaseSink) {
testRunCaseSink.setId(id);
testRunCaseSink.setTestCaseId(testCaseId);
TopologySink topologySink = getAssociatedTopologySink(topologyId, testCaseId, testRunCaseSink.getSinkId());
testRunCaseSink.setVersionId(topologySink.getVersionId());
TopologyTestRunCaseSink updatedCase = catalogService.addOrUpdateTopologyTestRunCaseSink(testRunCaseSink.getId(), testRunCaseSink);
return WSUtils.respondEntity(updatedCase, OK);
}
private TopologySink getAssociatedTopologySink(Long topologyId, Long testCaseId, Long topologySinkId) {
TopologyTestRunCase testCase = catalogService.getTopologyTestRunCase(topologyId, testCaseId);
if (testCase == null) {
throw EntityNotFoundException.byId("Topology test case with topology id " + topologyId +
" and test case id " + testCaseId);
}
TopologySink topologySink = catalogService.getTopologySink(topologyId, topologySinkId,
testCase.getVersionId());
if (topologySink == null) {
throw EntityNotFoundException.byId("Topology sink with topology id " + topologyId +
" and version id " + testCase.getVersionId());
} else if (!testCase.getVersionId().equals(topologySink.getVersionId())) {
throw new IllegalStateException("Test case and topology sink point to the different version id: "
+ "version id of test case: " + testCase.getVersionId() + " / "
+ "version id of topology sink: " + topologySink.getVersionId());
}
return topologySink;
}
@GET
@Path("/topologies/{topologyId}/testcases/{testcaseId}/sinks/{id}")
public Response getTestRunCaseSink(@PathParam("topologyId") Long topologyId,
@PathParam("testcaseId") Long testcaseId,
@PathParam("id") Long id) {
TopologyTestRunCaseSink testCaseSink = catalogService.getTopologyTestRunCaseSink(testcaseId, id);
if (testCaseSink == null) {
throw EntityNotFoundException.byId(Long.toString(id));
}
return WSUtils.respondEntity(testCaseSink, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sinks/topologysink/{sinkId}")
public Response getTestRunCaseSinkByTopologySink(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId,
@PathParam("sinkId") Long sinkId) {
TopologyTestRunCaseSink testCaseSink = catalogService.getTopologyTestRunCaseSinkBySinkId(testCaseId, sinkId);
if (testCaseSink == null) {
throw EntityNotFoundException.byId("test case id: " + testCaseId + " , topology source id: " + sinkId);
}
return WSUtils.respondEntity(testCaseSink, OK);
}
@GET
@Path("/topologies/{topologyId}/testcases/{testCaseId}/sinks")
public Response listTestRunCaseSink(@PathParam("topologyId") Long topologyId,
@PathParam("testCaseId") Long testCaseId) {
Collection<TopologyTestRunCaseSink> sources = catalogService.listTopologyTestRunCaseSink(topologyId, testCaseId);
if (sources == null) {
throw EntityNotFoundException.byFilter("topologyId: " + topologyId + " / testCaseId: " + testCaseId);
}
return WSUtils.respondEntities(sources, OK);
}
private static class SimplifiedTopologyTestRunHistory {
private Long id;
private Long topologyId;
private Long versionId;
private Boolean finished = false;
private Boolean success = false;
private Boolean matched = false;
private Long startTime;
private Long finishTime;
private Long timestamp;
SimplifiedTopologyTestRunHistory(TopologyTestRunHistory history) {
id = history.getId();
topologyId = history.getTopologyId();
versionId = history.getVersionId();
finished = history.getFinished();
success = history.getSuccess();
matched = history.getMatched();
startTime = history.getStartTime();
finishTime = history.getFinishTime();
timestamp = history.getTimestamp();
}
public Long getId() {
return id;
}
public Long getTopologyId() {
return topologyId;
}
public Long getVersionId() {
return versionId;
}
public Boolean getFinished() {
return finished;
}
public Boolean getSuccess() {
return success;
}
public Boolean getMatched() {
return matched;
}
public Long getStartTime() {
return startTime;
}
public Long getFinishTime() {
return finishTime;
}
public Long getTimestamp() {
return timestamp;
}
}
}<|fim▁end|> | TopologyTestRunCase testRunCase) {
testRunCase.setTopologyId(topologyId);
Long currentVersionId = catalogService.getCurrentVersionId(topologyId);
testRunCase.setVersionId(currentVersionId); |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
#[macro_use]
extern crate bitflags;
extern crate ipc_channel;
extern crate msg;
extern crate rustc_serialize;
extern crate serde;
extern crate url;
extern crate hyper;
extern crate util;
extern crate time;
use rustc_serialize::{Decodable, Decoder};
use msg::constellation_msg::{PipelineId, WorkerId};
use util::str::DOMString;
use url::Url;
use hyper::header::Headers;
use hyper::http::RawStatus;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use time::Duration;
use std::net::TcpStream;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: DOMString,
pub url: Url
}
/// Messages to the instruct the devtools server to update its known actors/state
/// according to changes in the browser.
pub enum DevtoolsControlMsg {
FromChrome(ChromeToDevtoolsControlMsg),
FromScript(ScriptToDevtoolsControlMsg),
}
pub enum ChromeToDevtoolsControlMsg {
AddClient(TcpStream),
FramerateTick(String, f64),
ServerExitMsg,
NetworkEventMessage(String, NetworkEvent),
}
#[derive(Deserialize, Serialize)]
pub enum ScriptToDevtoolsControlMsg {
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
SendConsoleMessage(PipelineId, ConsoleMessage, Option<WorkerId>),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue { class: String, uuid: String },
}
#[derive(Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
#[derive(PartialEq, Eq, Deserialize, Serialize)]
pub enum TracingMetadata {
Default,
IntervalStart,
IntervalEnd,
Event,
EventBacktrace,
}
#[derive(Deserialize, Serialize)]
pub struct TimelineMarker {
pub name: String,
pub metadata: TracingMetadata,
pub time: PreciseTime,
pub stack: Option<Vec<()>>,
}
#[derive(PartialEq, Eq, Hash, Clone, Deserialize, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// Messages to process in a particular script task, as instructed by a devtools client.
#[derive(Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
GetRootNode(PipelineId, IpcSender<NodeInfo>),
GetDocumentElement(PipelineId, IpcSender<NodeInfo>),
GetChildren(PipelineId, String, IpcSender<Vec<NodeInfo>>),
GetLayout(PipelineId, String, IpcSender<(f32, f32)>),
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
ModifyAttribute(PipelineId, String, Vec<Modification>),
WantsLiveNotifications(PipelineId, bool),
SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<TimelineMarker>),
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
RequestAnimationFrame(PipelineId, IpcSender<f64>),
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
impl Decodable for Modification {
fn decode<D: Decoder>(d: &mut D) -> Result<Modification, D::Error> {
d.read_struct("Modification", 2, |d|
Ok(Modification {
attributeName: try!(d.read_struct_field("attributeName", 0, |d| Decodable::decode(d))),
newValue: match d.read_struct_field("newValue", 1, |d| Decodable::decode(d)) {
Ok(opt) => opt,
Err(_) => None
}
})
)
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: u32,
pub columnNumber: u32,
}
bitflags! {
#[derive(Deserialize, Serialize)]
flags CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0,
const CONSOLE_API = 1 << 1,
}
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct PageError {
pub _type: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,<|fim▁hole|> pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct ConsoleAPI {
pub _type: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Clone)]
pub enum NetworkEvent {
HttpRequest(Url, Method, Headers, Option<Vec<u8>>),
HttpResponse(Option<Headers>, Option<RawStatus>, Option<Vec<u8>>)
}
impl TimelineMarker {
pub fn new(name: String, metadata: TracingMetadata) -> TimelineMarker {
TimelineMarker {
name: name,
metadata: metadata,
time: PreciseTime::now(),
stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn to(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}<|fim▁end|> | pub timeStamp: u64,
pub error: bool, |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>try:
import arcpy.mapping
from ._publishing import (convert_desktop_map_to_service_draft as convert_map_to_service_draft,<|fim▁hole|><|fim▁end|> | convert_toolbox_to_service_draft)
except:
from ._publishing import (convert_pro_map_to_service_draft as convert_map_to_service_draft,
convert_toolbox_to_service_draft) |
<|file_name|>http.rs<|end_file_name|><|fim▁begin|>//! Pieces pertaining to the HTTP message protocol.
use std::borrow::Cow::{Borrowed, Owned};
use std::borrow::IntoCow;
use std::cmp::min;
use std::old_io::{self, Reader, IoResult, BufWriter};
use std::num::from_u16;
use std::str::{self, FromStr};
use std::string::CowString;
use url::Url;
use url::ParseError as UrlError;
use method;
use status::StatusCode;
use uri;
use uri::RequestUri::{AbsolutePath, AbsoluteUri, Authority, Star};
use version::HttpVersion;
use version::HttpVersion::{Http09, Http10, Http11, Http20};
use HttpError::{HttpHeaderError, HttpIoError, HttpMethodError, HttpStatusError,
HttpUriError, HttpVersionError};
use HttpResult;
use self::HttpReader::{SizedReader, ChunkedReader, EofReader, EmptyReader};
use self::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter};
/// Readers to handle different Transfer-Encodings.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
pub enum HttpReader<R> {
/// A Reader used when a Content-Length header is passed with a positive integer.
SizedReader(R, u64),
/// A Reader used when Transfer-Encoding is `chunked`.
ChunkedReader(R, Option<u64>),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
EofReader(R),
/// A Reader used for messages that should never have a body.
///
/// See https://tools.ietf.org/html/rfc7230#section-3.3.3
EmptyReader(R),
}
impl<R: Reader> HttpReader<R> {
/// Unwraps this HttpReader and returns the underlying Reader.
pub fn unwrap(self) -> R {
match self {
SizedReader(r, _) => r,
ChunkedReader(r, _) => r,
EofReader(r) => r,
EmptyReader(r) => r,
}
}
}
impl<R: Reader> Reader for HttpReader<R> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
match *self {
SizedReader(ref mut body, ref mut remaining) => {
debug!("Sized read, remaining={:?}", remaining);
if *remaining == 0 {
Err(old_io::standard_error(old_io::EndOfFile))
} else {
let num = try!(body.read(buf)) as u64;
if num > *remaining {
*remaining = 0;
} else {
*remaining -= num;
}
Ok(num as usize)
}
},
ChunkedReader(ref mut body, ref mut opt_remaining) => {
let mut rem = match *opt_remaining {
Some(ref rem) => *rem,
// None means we don't know the size of the next chunk
None => try!(read_chunk_size(body))
};
debug!("Chunked read, remaining={:?}", rem);
if rem == 0 {
*opt_remaining = Some(0);
// chunk of size 0 signals the end of the chunked stream
// if the 0 digit was missing from the stream, it would
// be an InvalidInput error instead.
debug!("end of chunked");
return Err(old_io::standard_error(old_io::EndOfFile));
}
let to_read = min(rem as usize, buf.len());
let count = try!(body.read(&mut buf[..to_read])) as u64;
rem -= count;
*opt_remaining = if rem > 0 {
Some(rem)
} else {
try!(eat(body, LINE_ENDING.as_bytes()));
None
};
Ok(count as usize)
},
EofReader(ref mut body) => {
body.read(buf)
},
EmptyReader(_) => Err(old_io::standard_error(old_io::EndOfFile))
}
}
}
fn eat<R: Reader>(rdr: &mut R, bytes: &[u8]) -> IoResult<()> {
for &b in bytes.iter() {
match try!(rdr.read_byte()) {
byte if byte == b => (),
_ => return Err(old_io::standard_error(old_io::InvalidInput))
}
}
Ok(())
}
/// Chunked chunks start with 1*HEXDIGIT, indicating the size of the chunk.
fn read_chunk_size<R: Reader>(rdr: &mut R) -> IoResult<u64> {
let mut size = 0u64;
let radix = 16;
let mut in_ext = false;
let mut in_chunk_size = true;
loop {
match try!(rdr.read_byte()) {
b@b'0'...b'9' if in_chunk_size => {
size *= radix;
size += (b - b'0') as u64;
},
b@b'a'...b'f' if in_chunk_size => {
size *= radix;
size += (b + 10 - b'a') as u64;
},
b@b'A'...b'F' if in_chunk_size => {
size *= radix;
size += (b + 10 - b'A') as u64;
},
CR => {
match try!(rdr.read_byte()) {
LF => break,
_ => return Err(old_io::standard_error(old_io::InvalidInput))
}
},
// If we weren't in the extension yet, the ";" signals its start
b';' if !in_ext => {
in_ext = true;
in_chunk_size = false;
},
// "Linear white space" is ignored between the chunk size and the
// extension separator token (";") due to the "implied *LWS rule".
b'\t' | b' ' if !in_ext & !in_chunk_size => {},
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' if in_chunk_size => in_chunk_size = false,
// We allow any arbitrary octet once we are in the extension, since
// they all get ignored anyway. According to the HTTP spec, valid
// extensions would have a more strict syntax:
// (token ["=" (token | quoted-string)])
// but we gain nothing by rejecting an otherwise valid chunk size.
ext if in_ext => {
todo!("chunk extension byte={}", ext);
},
// Finally, if we aren't in the extension and we're reading any
// other octet, the chunk size line is invalid!
_ => {
return Err(old_io::standard_error(old_io::InvalidInput));
}
}
}
debug!("chunk size={:?}", size);
Ok(size)
}
/// Writers to handle different Transfer-Encodings.
pub enum HttpWriter<W: Writer> {
/// A no-op Writer, used initially before Transfer-Encoding is determined.
ThroughWriter(W),
/// A Writer for when Transfer-Encoding includes `chunked`.
ChunkedWriter(W),
/// A Writer for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
SizedWriter(W, u64),
/// A writer that should not write any body.
EmptyWriter(W),
}
impl<W: Writer> HttpWriter<W> {
/// Unwraps the HttpWriter and returns the underlying Writer.
#[inline]
pub fn unwrap(self) -> W {
match self {
ThroughWriter(w) => w,
ChunkedWriter(w) => w,
SizedWriter(w, _) => w,
EmptyWriter(w) => w,
}
}
/// Access the inner Writer.
#[inline]
pub fn get_ref<'a>(&'a self) -> &'a W {
match *self {
ThroughWriter(ref w) => w,
ChunkedWriter(ref w) => w,
SizedWriter(ref w, _) => w,
EmptyWriter(ref w) => w,
}
}
/// Access the inner Writer mutably.
///
/// Warning: You should not write to this directly, as you can corrupt
/// the state.
#[inline]
pub fn get_mut<'a>(&'a mut self) -> &'a mut W {
match *self {
ThroughWriter(ref mut w) => w,
ChunkedWriter(ref mut w) => w,
SizedWriter(ref mut w, _) => w,
EmptyWriter(ref mut w) => w,
}
}
/// Ends the HttpWriter, and returns the underlying Writer.
///
/// A final `write_all()` is called with an empty message, and then flushed.
/// The ChunkedWriter variant will use this to write the 0-sized last-chunk.
#[inline]
pub fn end(mut self) -> IoResult<W> {
try!(self.write_all(&[]));
try!(self.flush());
Ok(self.unwrap())
}
}
impl<W: Writer> Writer for HttpWriter<W> {
#[inline]
fn write_all(&mut self, msg: &[u8]) -> IoResult<()> {
match *self {
ThroughWriter(ref mut w) => w.write_all(msg),
ChunkedWriter(ref mut w) => {
let chunk_size = msg.len();
debug!("chunked write, size = {:?}", chunk_size);
try!(write!(w, "{:X}{}", chunk_size, LINE_ENDING));
try!(w.write_all(msg));
w.write_str(LINE_ENDING)
},
SizedWriter(ref mut w, ref mut remaining) => {
let len = msg.len() as u64;
if len > *remaining {
let len = *remaining;
*remaining = 0;
try!(w.write_all(&msg[..len as usize]));
Err(old_io::standard_error(old_io::ShortWrite(len as usize)))
} else {
*remaining -= len;
w.write_all(msg)
}
},
EmptyWriter(..) => {
let bytes = msg.len();
if bytes == 0 {
Ok(())
} else {
Err(old_io::IoError {
kind: old_io::ShortWrite(bytes),
desc: "EmptyWriter cannot write any bytes",
detail: Some("Cannot include a body with this kind of message".to_string())
})
}
}
}
}
#[inline]
fn flush(&mut self) -> IoResult<()> {
match *self {
ThroughWriter(ref mut w) => w.flush(),
ChunkedWriter(ref mut w) => w.flush(),
SizedWriter(ref mut w, _) => w.flush(),
EmptyWriter(ref mut w) => w.flush(),
}
}
}
pub const SP: u8 = b' ';
pub const CR: u8 = b'\r';
pub const LF: u8 = b'\n';
pub const STAR: u8 = b'*';
pub const LINE_ENDING: &'static str = "\r\n";
/// Determines if byte is a token char.
///
/// > ```notrust
/// > token = 1*tchar
/// >
/// > tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
/// > / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
/// > / DIGIT / ALPHA
/// > ; any VCHAR, except delimiters
/// > ```
#[inline]
pub fn is_token(b: u8) -> bool {
match b {
b'a'...b'z' |
b'A'...b'Z' |
b'0'...b'9' |
b'!' |
b'#' |
b'$' |
b'%' |
b'&' |
b'\''|
b'*' |
b'+' |
b'-' |
b'.' |
b'^' |
b'_' |
b'`' |
b'|' |
b'~' => true,
_ => false
}
}
/// Read token bytes from `stream` into `buf` until a space is encountered.
/// Returns `Ok(true)` if we read until a space,
/// `Ok(false)` if we got to the end of `buf` without encountering a space,
/// otherwise returns any error encountered reading the stream.
///
/// The remaining contents of `buf` are left untouched.
fn read_token_until_space<R: Reader>(stream: &mut R, buf: &mut [u8]) -> HttpResult<bool> {
use std::old_io::BufWriter;
let mut bufwrt = BufWriter::new(buf);
loop {
let byte = try!(stream.read_byte());
if byte == SP {
break;
} else if !is_token(byte) {
return Err(HttpMethodError);
// Read to end but there's still more
} else if bufwrt.write_u8(byte).is_err() {
return Ok(false);
}
}
if bufwrt.tell().unwrap() == 0 {
return Err(HttpMethodError);
}
Ok(true)
}
/// Read a `Method` from a raw stream, such as `GET`.
/// ### Note:
/// Extension methods are only parsed to 16 characters.
pub fn read_method<R: Reader>(stream: &mut R) -> HttpResult<method::Method> {
let mut buf = [SP; 16];
if !try!(read_token_until_space(stream, &mut buf)) {
return Err(HttpMethodError);
}
let maybe_method = match &buf[0..7] {
b"GET " => Some(method::Method::Get),
b"PUT " => Some(method::Method::Put),
b"POST " => Some(method::Method::Post),
b"HEAD " => Some(method::Method::Head),
b"PATCH " => Some(method::Method::Patch),
b"TRACE " => Some(method::Method::Trace),
b"DELETE " => Some(method::Method::Delete),
b"CONNECT" => Some(method::Method::Connect),
b"OPTIONS" => Some(method::Method::Options),
_ => None,
};
debug!("maybe_method = {:?}", maybe_method);
match (maybe_method, &buf[]) {
(Some(method), _) => Ok(method),
(None, ext) => {
// We already checked that the buffer is ASCII
Ok(method::Method::Extension(unsafe { str::from_utf8_unchecked(ext) }.trim().to_string()))
},
}
}
/// Read a `RequestUri` from a raw stream.
pub fn read_uri<R: Reader>(stream: &mut R) -> HttpResult<uri::RequestUri> {
let mut b = try!(stream.read_byte());
while b == SP {
b = try!(stream.read_byte());
}
let mut s = String::new();
if b == STAR {
try!(expect(stream.read_byte(), SP));
return Ok(Star)
} else {
s.push(b as char);
loop {
match try!(stream.read_byte()) {
SP => {
break;
},
CR | LF => {
return Err(HttpUriError(UrlError::InvalidCharacter))
},
b => s.push(b as char)
}
}
}
debug!("uri buf = {:?}", s);
if s.as_slice().starts_with("/") {
Ok(AbsolutePath(s))
} else if s.as_slice().contains("/") {
Ok(AbsoluteUri(try!(Url::parse(s.as_slice()))))
} else {
let mut temp = "http://".to_string();
temp.push_str(s.as_slice());
try!(Url::parse(temp.as_slice()));
todo!("compare vs u.authority()");
Ok(Authority(s))
}
}
/// Read the `HttpVersion` from a raw stream, such as `HTTP/1.1`.
pub fn read_http_version<R: Reader>(stream: &mut R) -> HttpResult<HttpVersion> {
try!(expect(stream.read_byte(), b'H'));
try!(expect(stream.read_byte(), b'T'));
try!(expect(stream.read_byte(), b'T'));
try!(expect(stream.read_byte(), b'P'));
try!(expect(stream.read_byte(), b'/'));
match try!(stream.read_byte()) {
b'0' => {
try!(expect(stream.read_byte(), b'.'));
try!(expect(stream.read_byte(), b'9'));
Ok(Http09)
},
b'1' => {
try!(expect(stream.read_byte(), b'.'));
match try!(stream.read_byte()) {
b'0' => Ok(Http10),
b'1' => Ok(Http11),
_ => Err(HttpVersionError)
}
},
b'2' => {
try!(expect(stream.read_byte(), b'.'));
try!(expect(stream.read_byte(), b'0'));
Ok(Http20)
},
_ => Err(HttpVersionError)
}
}
const MAX_HEADER_NAME_LENGTH: usize = 100;
const MAX_HEADER_FIELD_LENGTH: usize = 4096;
/// The raw bytes when parsing a header line.
///
/// A String and Vec<u8>, divided by COLON (`:`). The String is guaranteed
/// to be all `token`s. See `is_token` source for all valid characters.
pub type RawHeaderLine = (String, Vec<u8>);
/// Read a RawHeaderLine from a Reader.
///
/// From [spec](https://tools.ietf.org/html/http#section-3.2):
///
/// > Each header field consists of a case-insensitive field name followed
/// > by a colon (":"), optional leading whitespace, the field value, and
/// > optional trailing whitespace.
/// >
/// > ```notrust
/// > header-field = field-name ":" OWS field-value OWS
/// >
/// > field-name = token
/// > field-value = *( field-content / obs-fold )
/// > field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
/// > field-vchar = VCHAR / obs-text
/// >
/// > obs-fold = CRLF 1*( SP / HTAB )
/// > ; obsolete line folding
/// > ; see Section 3.2.4
/// > ```
pub fn read_header<R: Reader>(stream: &mut R) -> HttpResult<Option<RawHeaderLine>> {
let mut name = String::new();
let mut value = vec![];
loop {
match try!(stream.read_byte()) {
CR if name.len() == 0 => {
match try!(stream.read_byte()) {
LF => return Ok(None),
_ => return Err(HttpHeaderError)
}
},
b':' => break,
b if is_token(b) => {
if name.len() > MAX_HEADER_NAME_LENGTH { return Err(HttpHeaderError); }
name.push(b as char)
},
_nontoken => return Err(HttpHeaderError)
};
}
debug!("header name = {:?}", name);
let mut ows = true; //optional whitespace
todo!("handle obs-folding (gross!)");
loop {
match try!(stream.read_byte()) {
CR => break,
LF => return Err(HttpHeaderError),
b' ' if ows => {},
b => {
ows = false;
if value.len() > MAX_HEADER_FIELD_LENGTH { return Err(HttpHeaderError); }
value.push(b)
}
};
}
// Remove optional trailing whitespace
let real_len = value.len() - value.iter().rev().take_while(|&&x| b' ' == x).count();
value.truncate(real_len);
match try!(stream.read_byte()) {
LF => Ok(Some((name, value))),
_ => Err(HttpHeaderError)
}
}
/// `request-line = method SP request-target SP HTTP-version CRLF`
pub type RequestLine = (method::Method, uri::RequestUri, HttpVersion);
/// Read the `RequestLine`, such as `GET / HTTP/1.1`.
pub fn read_request_line<R: Reader>(stream: &mut R) -> HttpResult<RequestLine> {
debug!("read request line");
let method = try!(read_method(stream));
debug!("method = {:?}", method);
let uri = try!(read_uri(stream));
debug!("uri = {:?}", uri);
let version = try!(read_http_version(stream));
debug!("version = {:?}", version);
if try!(stream.read_byte()) != CR {
return Err(HttpVersionError);
}
if try!(stream.read_byte()) != LF {
return Err(HttpVersionError);
}
Ok((method, uri, version))
}
/// `status-line = HTTP-version SP status-code SP reason-phrase CRLF`
///
/// However, reason-phrase is absolutely useless, so its tossed.
pub type StatusLine = (HttpVersion, RawStatus);
/// The raw status code and reason-phrase.
#[derive(PartialEq, Show)]
pub struct RawStatus(pub u16, pub CowString<'static>);
impl Clone for RawStatus {
fn clone(&self) -> RawStatus {
RawStatus(self.0, self.1.clone().into_cow())
}
}
/// Read the StatusLine, such as `HTTP/1.1 200 OK`.
///
/// > The first line of a response message is the status-line, consisting
/// > of the protocol version, a space (SP), the status code, another
/// > space, a possibly empty textual phrase describing the status code,
/// > and ending with CRLF.
/// >
/// >```notrust
/// > status-line = HTTP-version SP status-code SP reason-phrase CRLF
/// > status-code = 3DIGIT
/// > reason-phrase = *( HTAB / SP / VCHAR / obs-text )
/// >```
pub fn read_status_line<R: Reader>(stream: &mut R) -> HttpResult<StatusLine> {
let version = try!(read_http_version(stream));
if try!(stream.read_byte()) != SP {
return Err(HttpVersionError);
}
let code = try!(read_status(stream));
Ok((version, code))
}
/// Read the StatusCode from a stream.
pub fn read_status<R: Reader>(stream: &mut R) -> HttpResult<RawStatus> {
let code = [
try!(stream.read_byte()),
try!(stream.read_byte()),
try!(stream.read_byte()),
];
let code = match str::from_utf8(code.as_slice()).ok().and_then(FromStr::from_str) {
Some(num) => num,
None => return Err(HttpStatusError)
};
match try!(stream.read_byte()) {
b' ' => (),
_ => return Err(HttpStatusError)
}
let mut buf = [b' '; 32];
{
let mut bufwrt = BufWriter::new(&mut buf);
'read: loop {
match try!(stream.read_byte()) {
CR => match try!(stream.read_byte()) {
LF => break,
_ => return Err(HttpStatusError)
},
b => match bufwrt.write_u8(b) {
Ok(_) => (),
Err(_) => {
for _ in 0u8..128 {
match try!(stream.read_byte()) {
CR => match try!(stream.read_byte()) {
LF => break 'read,
_ => return Err(HttpStatusError)
},
_ => { /* ignore */ }
}
}
return Err(HttpStatusError)
}
}
}
}
}
let reason = match str::from_utf8(&buf[]) {
Ok(s) => s.trim(),
Err(_) => return Err(HttpStatusError)
};
let reason = match from_u16::<StatusCode>(code) {
Some(status) => match status.canonical_reason() {
Some(phrase) => {
if phrase == reason {
Borrowed(phrase)
} else {
Owned(reason.to_string())
}
}
_ => Owned(reason.to_string())
},
None => return Err(HttpStatusError)
};
Ok(RawStatus(code, reason))
}
#[inline]
fn expect(r: IoResult<u8>, expected: u8) -> HttpResult<()> {
match r {
Ok(b) if b == expected => Ok(()),
Ok(_) => Err(HttpVersionError),
Err(e) => Err(HttpIoError(e))
}
}
#[cfg(test)]
mod tests {
use std::old_io::{self, MemReader, MemWriter, IoResult};
use std::borrow::Cow::{Borrowed, Owned};
use test::Bencher;
use uri::RequestUri;
use uri::RequestUri::{Star, AbsoluteUri, AbsolutePath, Authority};
use method;
use version::HttpVersion;
use version::HttpVersion::{Http10, Http11, Http20};
use HttpError::{HttpVersionError, HttpMethodError};
use HttpResult;
use url::Url;
use super::{read_method, read_uri, read_http_version, read_header,
RawHeaderLine, read_status, RawStatus, read_chunk_size};
fn mem(s: &str) -> MemReader {
MemReader::new(s.as_bytes().to_vec())<|fim▁hole|> #[test]
fn test_read_method() {
fn read(s: &str, result: HttpResult<method::Method>) {
assert_eq!(read_method(&mut mem(s)), result);
}
read("GET /", Ok(method::Method::Get));
read("POST /", Ok(method::Method::Post));
read("PUT /", Ok(method::Method::Put));
read("HEAD /", Ok(method::Method::Head));
read("OPTIONS /", Ok(method::Method::Options));
read("CONNECT /", Ok(method::Method::Connect));
read("TRACE /", Ok(method::Method::Trace));
read("PATCH /", Ok(method::Method::Patch));
read("FOO /", Ok(method::Method::Extension("FOO".to_string())));
read("akemi!~#HOMURA /", Ok(method::Method::Extension("akemi!~#HOMURA".to_string())));
read(" ", Err(HttpMethodError));
}
#[test]
fn test_read_uri() {
fn read(s: &str, result: HttpResult<RequestUri>) {
assert_eq!(read_uri(&mut mem(s)), result);
}
read("* ", Ok(Star));
read("http://hyper.rs/ ", Ok(AbsoluteUri(Url::parse("http://hyper.rs/").unwrap())));
read("hyper.rs ", Ok(Authority("hyper.rs".to_string())));
read("/ ", Ok(AbsolutePath("/".to_string())));
}
#[test]
fn test_read_http_version() {
fn read(s: &str, result: HttpResult<HttpVersion>) {
assert_eq!(read_http_version(&mut mem(s)), result);
}
read("HTTP/1.0", Ok(Http10));
read("HTTP/1.1", Ok(Http11));
read("HTTP/2.0", Ok(Http20));
read("HTP/2.0", Err(HttpVersionError));
read("HTTP.2.0", Err(HttpVersionError));
read("HTTP 2.0", Err(HttpVersionError));
read("TTP 2.0", Err(HttpVersionError));
}
#[test]
fn test_read_status() {
fn read(s: &str, result: HttpResult<RawStatus>) {
assert_eq!(read_status(&mut mem(s)), result);
}
fn read_ignore_string(s: &str, result: HttpResult<RawStatus>) {
match (read_status(&mut mem(s)), result) {
(Ok(RawStatus(ref c1, _)), Ok(RawStatus(ref c2, _))) => {
assert_eq!(c1, c2);
},
(r1, r2) => assert_eq!(r1, r2)
}
}
read("200 OK\r\n", Ok(RawStatus(200, Borrowed("OK"))));
read("404 Not Found\r\n", Ok(RawStatus(404, Borrowed("Not Found"))));
read("200 crazy pants\r\n", Ok(RawStatus(200, Owned("crazy pants".to_string()))));
read("301 Moved Permanently\r\n", Ok(RawStatus(301, Owned("Moved Permanently".to_string()))));
read_ignore_string("301 Unreasonably long header that should not happen, \
but some men just want to watch the world burn\r\n",
Ok(RawStatus(301, Owned("Ignored".to_string()))));
}
#[test]
fn test_read_header() {
fn read(s: &str, result: HttpResult<Option<RawHeaderLine>>) {
assert_eq!(read_header(&mut mem(s)), result);
}
read("Host: rust-lang.org\r\n", Ok(Some(("Host".to_string(),
b"rust-lang.org".to_vec()))));
}
#[test]
fn test_write_chunked() {
use std::str::from_utf8;
let mut w = super::HttpWriter::ChunkedWriter(MemWriter::new());
w.write_all(b"foo bar").unwrap();
w.write_all(b"baz quux herp").unwrap();
let buf = w.end().unwrap().into_inner();
let s = from_utf8(buf.as_slice()).unwrap();
assert_eq!(s, "7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n");
}
#[test]
fn test_write_sized() {
use std::str::from_utf8;
let mut w = super::HttpWriter::SizedWriter(MemWriter::new(), 8);
w.write_all(b"foo bar").unwrap();
assert_eq!(w.write_all(b"baz"), Err(old_io::standard_error(old_io::ShortWrite(1))));
let buf = w.end().unwrap().into_inner();
let s = from_utf8(buf.as_slice()).unwrap();
assert_eq!(s, "foo barb");
}
#[test]
fn test_read_chunk_size() {
fn read(s: &str, result: IoResult<u64>) {
assert_eq!(read_chunk_size(&mut mem(s)), result);
}
read("1\r\n", Ok(1));
read("01\r\n", Ok(1));
read("0\r\n", Ok(0));
read("00\r\n", Ok(0));
read("A\r\n", Ok(10));
read("a\r\n", Ok(10));
read("Ff\r\n", Ok(255));
read("Ff \r\n", Ok(255));
// Missing LF or CRLF
read("F\rF", Err(old_io::standard_error(old_io::InvalidInput)));
read("F", Err(old_io::standard_error(old_io::EndOfFile)));
// Invalid hex digit
read("X\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
read("1X\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
read("-\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
read("-1\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
// Acceptable (if not fully valid) extensions do not influence the size
read("1;extension\r\n", Ok(1));
read("a;ext name=value\r\n", Ok(10));
read("1;extension;extension2\r\n", Ok(1));
read("1;;; ;\r\n", Ok(1));
read("2; extension...\r\n", Ok(2));
read("3 ; extension=123\r\n", Ok(3));
read("3 ;\r\n", Ok(3));
read("3 ; \r\n", Ok(3));
// Invalid extensions cause an error
read("1 invalid extension\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
read("1 A\r\n", Err(old_io::standard_error(old_io::InvalidInput)));
read("1;no CRLF", Err(old_io::standard_error(old_io::EndOfFile)));
}
#[bench]
fn bench_read_method(b: &mut Bencher) {
b.bytes = b"CONNECT ".len() as u64;
b.iter(|| assert_eq!(read_method(&mut mem("CONNECT ")), Ok(method::Method::Connect)));
}
#[bench]
fn bench_read_status(b: &mut Bencher) {
b.bytes = b"404 Not Found\r\n".len() as u64;
b.iter(|| assert_eq!(read_status(&mut mem("404 Not Found\r\n")), Ok(RawStatus(404, Borrowed("Not Found")))));
}
}<|fim▁end|> | }
|
<|file_name|>parse_fasta.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import string, copy
<|fim▁hole|>import sys
def read_fasta(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key of a given query ID only contains its ID, not the full header
@return {header: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
def read_fasta_pdb(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key = PDB accession
@return {PDB-acc: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:].split()[0]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
if __name__ == "__main__":
afile = open(sys.argv[1], 'r')
if len(sys.argv) == 3:
query_id = sys.argv[2]
else:
query_id = ''
seq_dict = read_fasta(afile, query_id)
afile.close()
print 'There are %d entries with unique headers in your file.' % len(seq_dict)<|fim▁end|> | |
<|file_name|>lda.py<|end_file_name|><|fim▁begin|>from mnist import MNIST
import numpy as np
from thirdparty import log_mvnpdf, log_mvnpdf_diag
data = MNIST('./data/mnist')
data.load_training()
data.load_testing()
train = np.array(data.train_images)/255.0
test = np.array(np.array(data.test_images)/255.0)
dataset = {i: [] for i in range(10) }
for it, x in enumerate(data.train_labels):
dataset[x].append(train[it])
mu = []
cov = []
for k in dataset:
mu.append(np.average(np.array(dataset[k])))
cov.append(np.cov(np.array(dataset[k]).T))
es = log_mvnpdf(train, np.array(mu), np.array(cov))
results = {i: [] for i in range(10) }
for it,e in enumerate(es):<|fim▁hole|><|fim▁end|> | results[np.argmax(e)].append(data.train_labels[it])
print(results) |
<|file_name|>sysdiag.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
""" sysdiag
Pierre Haessig — September 2013
"""
from __future__ import division, print_function
<|fim▁hole|> '''
base = str(base).strip()
if base == '':
# avoid having '' as name (although it would not break the code...)
raise ValueError('base name should not be empty!')
if base not in name_list:
return base
# Else: build another name by counting
i = 0
name = base + str(i)
while name in name_list:
i += 1
name = base + str(i)
return name
class System(object):
'''Diagram description of a system
a System is either an interconnecion of subsystems
or an atomic element (a leaf of the tree)
'''
def __init__(self, name='root', parent=None):
self.name = name
# Parent system, if any (None for top-level):
self.parent = None
# Children systems, if any (None for leaf-level):
self.subsystems = []
self.wires = []
self.ports = []
self.params = {}
# If a parent system is provided, request its addition as a subsystem
if parent is not None:
parent.add_subsystem(self)
#end __init__()
def is_empty(self):
'''True if the System contains no subsystems and no wires'''
return (not self.subsystems) and (not self.wires)
@property
def ports_dict(self):
'''dict of ports, which keys are the names of the ports'''
return {p.name:p for p in self.ports}
@property
def subsystems_dict(self):
'''dict of subsystems, which keys are the names of the systems'''
return {s.name:s for s in self.subsystems}
def add_port(self, port, created_by_system = False):
'''add a Port to the System'''
if port in self.ports:
raise ValueError('port already added!')
# extract the port's name
name = port.name
port_names = [p.name for p in self.ports]
if name in port_names:
raise ValueError("port name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
port.system = self
port._created_by_system = bool(created_by_system)
self.ports.append(port)
def del_port(self, port):
'''delete a Port of the System (and disconnect any connected wire)
'''
if (port.wire is not None) or (port.internal_wire is not None):
# TODO : implement the wire disconnection
raise NotImplementedError('Cannot yet delete a connected Port')
# Remove the ports list:
self.ports.remove(port)
def add_subsystem(self, subsys):
# 1) Check name uniqueness
name = subsys.name
subsys_names = [s.name for s in self.subsystems]
if name in subsys_names:
raise ValueError("system name '{}' already exists in {:s}!".format(
name, repr(self))
)
# 2) Add parent relationship and add to the system list
subsys.parent = self
self.subsystems.append(subsys)
def add_wire(self, wire):
# 1) Check name uniqueness
name = wire.name
wire_names = [w.name for w in self.wires]
if name in wire_names:
raise ValueError("wire name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
wire.parent = self
self.wires.append(wire)
def create_name(self, category, base):
'''Returns a name (str) built on `base` that doesn't exist in
within the names of `category`.
'''
if category == 'subsystem':
components = self.subsystems
elif category == 'wire':
components = self.wires
else:
raise ValueError("Unknown category '{}'!".format(str(category)))
name_list = [c.name for c in components]
return _create_name(name_list, base)
def __repr__(self):
cls_name = self.__class__.__name__
s = "{:s}('{.name}')".format(cls_name, self)
return s
def __str__(self):
s = repr(self)
if self.parent:
s += '\n Parent: {:s}'.format(repr(self.parent))
if self.params:
s += '\n Parameters: {:s}'.format(str(self.params))
if self.ports:
s += '\n Ports: {:s}'.format(str(self.ports))
if self.subsystems:
s += '\n Subsytems: {:s}'.format(str(self.subsystems))
return s
def __eq__(self, other):
'''Systems compare equal if their class, `name` and `params` are equal.
and also their lists of ports and wires are *similar*
(see `_is_similar` methods of Port and Wire)
and finally their subsystems recursively compare equal.
parent systems are not compared (would generate infinite recursion).
'''
if not isinstance(other, System):
return NotImplemented
# Basic similarity
basic_sim = self.__class__ == other.__class__ and \
self.name == other.name and \
self.params == other.params
if not basic_sim:
return False
# Port similarity: (sensitive to the order)
ports_sim = all(p1._is_similar(p2) for (p1,p2)
in zip(self.ports, other.ports))
if not ports_sim:
return False
# Wires similarity
wires_sim = all(w1._is_similar(w2) for (w1,w2)
in zip(self.wires, other.wires))
if not wires_sim:
return False
print('equality at level {} is true'.format(self.name))
# Since everything matches, compare subsystems:
return self.subsystems == other.subsystems
# end __eq__()
def __ne__(self,other):
return not (self==other)
def _to_json(self):
'''convert the System instance to a JSON-serializable object
System is serialized with list of ports, subsystems and wires
but without connectivity information (e.g. no parent information)
ports created at the initialization of the system ("default ports")
are not serialized.
'''
# Filter out ports created at the initialization of the system
ports_list = [p for p in self.ports if not p._created_by_system]
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'System',
'__class__': cls_name,
'name':self.name,
'subsystems':self.subsystems,
'wires':self.wires,
'ports':ports_list,
'params':self.params
}
# end _to_json
def json_dump(self, output=None, indent=2, sort_keys=True):
'''dump (e.g. save) the System structure in json format
if `output` is None: return a json string
if `output` is a writable file: write in this file
'''
import json
if output is None:
return json.dumps(self, default=to_json, indent=indent, sort_keys=sort_keys)
else:
json.dump(self, output, default=to_json, indent=indent, sort_keys=sort_keys)
return
# end json_dump
class Port(object):
'''Port enables the connection of a System to a Wire
Each port has a `type` which only allows the connection of a Wire
of the same type.
it also have a `direction` ('none', 'in', 'out') that is set
at the class level
private attribute `_created_by_system` tells whether the port was created
automatically by the system's class at initialization or by a custom code
(if True, the port is not serialized by its system).
'''
direction = 'none'
def __init__(self, name, ptype):
self.name = name
self.type = ptype
self.system = None
self.wire = None
self.internal_wire = None
self._created_by_system = False
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def __str__(self):
s = repr(self) + ' of ' + repr(self.system)
return s
def _is_similar(self, other):
'''Ports are *similar* if their class, `type` and `name` are equal.
(their parent system are not compared)
'''
if not isinstance(other, Port):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name
def _to_json(self):
'''convert the Port instance to a JSON-serializable object
Ports are serialized without any connectivity information
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Port',
'__class__': cls_name,
'name':self.name,
'type':self.type
}
# end _to_json
class InputPort(Port):
'''Input Port'''
direction = 'in'
def __init__(self, name, ptype=''):
super(InputPort, self).__init__(name, ptype)
class OutputPort(Port):
'''Output Port'''
direction = 'out'
def __init__(self, name, ptype=''):
super(OutputPort, self).__init__(name, ptype)
class Wire(object):
'''Wire enables the interconnection of several Systems
through their Ports'''
def __init__(self, name, wtype, parent=None):
self.name = name
self.parent = None
self.type = wtype
self.ports = []
# If a parent system is provided, request its addition as a wire
if parent is not None:
parent.add_wire(self)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between Wire ̀ self` and a Port `port` is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
assert port_level in ['sibling', 'parent']
# Port availability (is there already a wire connected?):
if port_level == 'sibling':
connected_wire = port.wire
elif port_level == 'parent':
connected_wire = port.internal_wire
if connected_wire is not None:
if raise_error:
raise ValueError('port is already connected to '+\
'{:s}!'.format(repr(connected_wire)))
else:
return False
# Check parent relationship:
if port_level == 'sibling':
# Check that the wire and port.system are siblings:
if self.parent is not port.system.parent:
if raise_error:
raise ValueError('wire and port.system should have a common parent!')
else:
return False
elif port_level == 'parent':
# Check that the port.system is the parent of the wire:
if self.parent is not port.system:
if raise_error:
raise ValueError('port.system should be the parent of the wire!')
else:
return False
# Wire-Port Type checking:
if self.type == '':
# untyped wire: connection is always possible
return True
elif port.type == self.type:
return True
else:
# Incompatible types
if raise_error:
raise TypeError("Wire type '{:s}'".format(str(self.type)) + \
" and Port type '{:s}'".format(str(port.type)) + \
" are not compatible!")
else:
return False
def connect_port(self, port, port_level='sibling'):
'''Connect the Wire to a Port `port`'''
if port in self.ports:
return # Port is aleady connected
# Type checking:
self.is_connect_allowed(port, port_level, raise_error=True)
# Add parent relationship:
assert port_level in ['sibling', 'parent']
if port_level=='sibling':
port.wire = self
elif port_level == 'parent':
port.internal_wire = self
# Book keeping of ports:
self.ports.append(port)
@property
def ports_by_name(self):
'''triplet representation of port connections
(level, port.system.name, port.name)
(used for serialization)
'''
def port_triplet(p):
'''triplet representation (level, port.system.name, port.name)'''
if p.system is self.parent:
level = 'parent'
elif p.system.parent is self.parent:
level = 'sibling'
else:
raise ValueError('The system of Port {}'.format(repr(p)) +\
'is neither a parent nor a sibling!')
return (level, p.system.name, p.name)
return [port_triplet(p) for p in self.ports]
def connect_by_name(self, s_name, p_name, level='sibling'):
'''Connects the ports named `p_name` of system named `s_name`
to be found at level `level` ('parent' or 'sibling' (default))
'''
# TODO (?) merge the notion of level in the name (make parent a reserved name)
assert level in ['sibling', 'parent']
# 1) find the system:
if level == 'parent':
syst = self.parent
assert self.parent.name == s_name
elif level == 'sibling':
syst = self.parent.subsystems_dict[s_name]
port = syst.ports_dict[p_name]
self.connect_port(port, level)
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def _is_similar(self, other):
'''Wires are *similar* if their class, `type` and `name` are equal
and if their connectivity (`ports_by_name`) is the same
(their parent system are not compared)
'''
if not isinstance(other, Wire):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name and \
self.ports_by_name == other.ports_by_name
def _to_json(self):
'''convert the Wire instance to a JSON-serializable object
Wires are serialized with the port connectivity in tuples
(but parent relationship is not serialized)
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Wire',
'__class__': cls_name,
'name': self.name,
'type': self.type,
'ports': self.ports_by_name
}
# end _to_json
class SignalWire(Wire):
'''Signal Wire for the interconnection of several Systems
through their Input and Output Ports.
Each SignalWire can be connected to a unique Output Port (signal source)
and several Input Ports (signal sinks)
'''
def __init__(self, name, wtype='', parent=None):
super(SignalWire, self).__init__(name, wtype, parent)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between SignalWire ̀ self` and a Port `port`
is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
if port.direction not in ['in', 'out']:
if raise_error:
raise TypeError('Only Input/Output Port can be connected!')
else:
return False
def is_output(port, level):
'''an output port is either:
* a sibling system'port with direction == 'out' or
* a parent system'port with direction == 'in'
'''
if level=='detect':
wire = self
if wire.parent == port.system:
level = 'parent'
elif wire.parent == port.system.parent:
level = 'sibling'
else:
raise ValueError('Port is neither sibling nor parent')
is_out = (level=='sibling' and port.direction == 'out') or \
(level=='parent' and port.direction == 'in')
return is_out
# Now we have an I/O Port for sure:
if is_output(port, port_level):
# check that there is not already a signal source
other_ports = [p for p in self.ports if (is_output(p, 'detect')
and p is not port)]
if other_ports:
if raise_error:
raise ValueError('Only one output port can be connected!')
else:
return False
# Now the I/O aspect is fine. Launch some further checks:
return super(SignalWire, self).is_connect_allowed(port, port_level, raise_error)
def connect_systems(source, dest, s_pname, d_pname, wire_cls=Wire):
'''Connect systems `source` to `dest` using
port names `s_pname` and `d_pname`
with a wire of instance `wire_cls` (defaults to Wire)
The wire is created if necessary
Returns: the wire used for the connection
'''
# 1) find the ports
s_port = source.ports_dict[s_pname]
d_port = dest.ports_dict[d_pname]
# 2) find a prexisting wire:
w = None
if s_port.wire is not None:
w = s_port.wire
elif d_port.wire is not None:
w = d_port.wire
else:
parent = s_port.system.parent
wname = parent.create_name('wire','W')
wtype = s_port.type
w = wire_cls(wname, wtype, parent)
# 3) Make the connection:
w.connect_port(s_port)
w.connect_port(d_port)
return w
def to_json(py_obj):
'''convert `py_obj` to JSON-serializable objects
`py_obj` should be an instance of `System`, `Wire` or `Port`
'''
if isinstance(py_obj, System):
return py_obj._to_json()
if isinstance(py_obj, Wire):
return py_obj._to_json()
if isinstance(py_obj, Port):
return py_obj._to_json()
raise TypeError(repr(py_obj) + ' is not JSON serializable')
# end to_json
import sys
def _str_to_class(mod_class):
'''retreives the class from a "module.class" string'''
mod_name, cls_name = mod_class.split('.')
mod = sys.modules[mod_name]
return getattr(mod, cls_name)
def from_json(json_object):
'''deserializes a sysdiag json object'''
if '__sysdiagclass__' in json_object:
cls = _str_to_class(json_object['__class__'])
if json_object['__sysdiagclass__'] == 'Port':
port = cls(name = json_object['name'], ptype = json_object['type'])
return port
if json_object['__sysdiagclass__'] == 'System':
# TODO: specialize the instanciation for each class using
# _from_json class methods
syst = cls(name = json_object['name'])
syst.params = json_object['params']
# add ports if any:
for p in json_object['ports']:
syst.add_port(p)
# add subsystems
for s in json_object['subsystems']:
syst.add_subsystem(s)
# add wires
for w_dict in json_object['wires']:
# 1) decode the wire:
w_cls = _str_to_class(w_dict['__class__'])
w = w_cls(name = w_dict['name'], wtype = w_dict['type'])
syst.add_wire(w)
# make the connections:
for level, s_name, p_name in w_dict['ports']:
w.connect_by_name(s_name, p_name, level)
# end for each wire
return syst
return json_object
def json_load(json_dump):
import json
syst = json.loads(json_dump, object_hook=from_json)
return syst<|fim▁end|> | def _create_name(name_list, base):
'''Returns a name (str) built on `base` that doesn't exist in `name_list`.
Useful for automatic creation of subsystems or wires |
<|file_name|>misc.rs<|end_file_name|><|fim▁begin|>//! Miscellaneous type-system utilities that are too small to deserve their own modules.<|fim▁hole|>use crate::infer::InferCtxtExt as _;
use crate::traits::{self, ObligationCause};
use rustc_hir as hir;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use crate::traits::error_reporting::InferCtxtExt;
#[derive(Clone)]
pub enum CopyImplementationError<'tcx> {
InfrigingFields(Vec<&'tcx ty::FieldDef>),
NotAnAdt,
HasDestructor,
}
pub fn can_type_implement_copy(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
self_type: Ty<'tcx>,
) -> Result<(), CopyImplementationError<'tcx>> {
// FIXME: (@jroesch) float this code up
tcx.infer_ctxt().enter(|infcx| {
let (adt, substs) = match self_type.kind() {
// These types used to have a builtin impl.
// Now libcore provides that impl.
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::Char
| ty::RawPtr(..)
| ty::Never
| ty::Ref(_, _, hir::Mutability::Not)
| ty::Array(..) => return Ok(()),
ty::Adt(adt, substs) => (adt, substs),
_ => return Err(CopyImplementationError::NotAnAdt),
};
let mut infringing = Vec::new();
for variant in &adt.variants {
for field in &variant.fields {
let ty = field.ty(tcx, substs);
if ty.references_error() {
continue;
}
let span = tcx.def_span(field.did);
let cause = ObligationCause::dummy_with_span(span);
let ctx = traits::FulfillmentContext::new();
match traits::fully_normalize(&infcx, ctx, cause, param_env, ty) {
Ok(ty) => {
if !infcx.type_is_copy_modulo_regions(param_env, ty, span) {
infringing.push(field);
}
}
Err(errors) => {
infcx.report_fulfillment_errors(&errors, None, false);
}
};
}
}
if !infringing.is_empty() {
return Err(CopyImplementationError::InfrigingFields(infringing));
}
if adt.has_dtor(tcx) {
return Err(CopyImplementationError::HasDestructor);
}
Ok(())
})
}<|fim▁end|> | |
<|file_name|>windowManager.ts<|end_file_name|><|fim▁begin|>import { app, BrowserWindow } from 'electron';
import path from 'path';
import createLogger from './functions/createLogger';
import directoryPaths from './electronDirectoryPaths';
const viewDirectoryPath = directoryPaths.views;
const logger = createLogger('electron/windowManager.ts');
export default new class {
/*<|fim▁hole|> public mainWindowCloseConfirmed = false;
public aboutWindow ?: BrowserWindow = undefined;
public mainWindow?: BrowserWindow = undefined;
createAboutWindow(parent?: BrowserWindow) {
logger.verbose('about window created');
this.aboutWindow = new BrowserWindow({
parent,
width: 380,
height: 290,
resizable: false,
movable: false,
center: true,
frame: false,
modal: true,
show: false,
webPreferences: {
nodeIntegration: true,
preload: path.join(viewDirectoryPath, '..', 'preload', 'preload.bundle.js'),
},
});
this.aboutWindow.loadURL(`file:///${path.resolve(viewDirectoryPath, 'about.html')}`);
this.aboutWindow.on('closed', () => {
this.aboutWindow = undefined;
});
}
createMainWindow({ debug }: { debug: boolean }) {
const language = app.getLocale();
const title = language === 'ko' ? '엔트리 하드웨어 v' : 'Entry Hardware v';
const { hardwareVersion } = global.sharedObject;
this.mainWindow = new BrowserWindow({
width: 800,
height: 670,
minWidth: 420,
title: title + hardwareVersion,
webPreferences: {
backgroundThrottling: false,
nodeIntegration: false,
preload: path.join(viewDirectoryPath, '..', 'preload', 'preload.bundle.js'),
},
});
this.mainWindow.loadURL(`file:///${path.resolve(viewDirectoryPath, 'index.html')}`);
if (debug) {
this.mainWindow.webContents.openDevTools();
}
this.mainWindow.setMenu(null);
this.mainWindow.on('close', (e) => {
if (!this.mainWindowCloseConfirmed) {
e.preventDefault();
logger.verbose('EntryHW close rejected. confirm connection close');
this.mainWindow?.webContents.send('hardwareCloseConfirm');
}
});
this.mainWindow.on('closed', () => {
this.mainWindow = undefined;
});
logger.verbose(`main window created. title: ${title + hardwareVersion}`);
}
}();<|fim▁end|> | 하드웨어 메인 윈도우는 하드웨어 연결중인 경우는 꺼지지 않도록 기획되었다.
그러므로 close native event 가 발생했을 때, 렌더러에 다시 물어본 후
해당 값을 세팅 한 뒤 다시 close 를 호출 하는 식으로 종료한다.
*/ |
<|file_name|>test_autocomplete.py<|end_file_name|><|fim▁begin|># encoding: utf-8
from collections import namedtuple
import inspect
import keyword
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import jedi
has_jedi = True
except ImportError:
has_jedi = False
from bpython import autocomplete
from bpython._py3compat import py3
from bpython.test import mock
is_py34 = sys.version_info[:2] >= (3, 4)
if is_py34:
glob_function = 'glob.iglob'
else:
glob_function = 'glob.glob'
class TestSafeEval(unittest.TestCase):
def test_catches_syntax_error(self):
with self.assertRaises(autocomplete.EvaluationError):
autocomplete.safe_eval('1re', {})
class TestFormatters(unittest.TestCase):
def test_filename(self):
completer = autocomplete.FilenameCompletion()
last_part_of_filename = completer.format
self.assertEqual(last_part_of_filename('abc'), 'abc')
self.assertEqual(last_part_of_filename('abc/'), 'abc/')
self.assertEqual(last_part_of_filename('abc/efg'), 'efg')
self.assertEqual(last_part_of_filename('abc/efg/'), 'efg/')
self.assertEqual(last_part_of_filename('/abc'), 'abc')
self.assertEqual(last_part_of_filename('ab.c/e.f.g/'), 'e.f.g/')
def test_attribute(self):
self.assertEqual(autocomplete.after_last_dot('abc.edf'), 'edf')
def completer(matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
class TestGetCompleter(unittest.TestCase):
def test_no_completers(self):
self.assertTupleEqual(autocomplete.get_completer([], 0, ''),
([], None))
def test_one_completer_without_matches_returns_empty_list_and_none(self):
a = completer([])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
([], None))
def test_one_completer_returns_matches_and_completer(self):
a = completer(['a'])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
(['a'], a))
def test_two_completers_with_matches_returns_first_matches(self):
a = completer(['a'])
b = completer(['b'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], a))
def test_first_non_none_completer_matches_are_returned(self):
a = completer([])
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), ([], None))
def test_only_completer_returns_None(self):
a = completer(None)
self.assertEqual(autocomplete.get_completer([a], 0, ''), ([], None))
def test_first_completer_returns_None(self):
a = completer(None)
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], b))
class TestCumulativeCompleter(unittest.TestCase):
def completer(self, matches, ):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
def test_no_completers_fails(self):
with self.assertRaises(ValueError):
autocomplete.CumulativeCompleter([])
def test_one_empty_completer_returns_empty(self):
a = self.completer([])
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), set())
def test_one_none_completer_returns_none(self):
a = self.completer(None)
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), None)
def test_two_completers_get_both(self):
a = self.completer(['a'])
b = self.completer(['b'])
cumulative = autocomplete.CumulativeCompleter([a, b])
self.assertEqual(cumulative.matches(3, 'abc'), set(['a', 'b']))
class TestFilenameCompletion(unittest.TestCase):
def setUp(self):
self.completer = autocomplete.FilenameCompletion()
def test_locate_fails_when_not_in_string(self):
self.assertEqual(self.completer.locate(4, "abcd"), None)
def test_locate_succeeds_when_in_string(self):
self.assertEqual(self.completer.locate(4, "a'bc'd"), (2, 4, 'bc'))
def test_issue_491(self):
self.assertNotEqual(self.completer.matches(9, '"a[a.l-1]'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_none_if_not_in_string(self):
self.assertEqual(self.completer.matches(2, 'abcd'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_empty_list_when_no_files(self):
self.assertEqual(self.completer.matches(2, '"a'), set())
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_match_returns_files_when_files_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa', 'abcde'])
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: True)
@mock.patch('os.path.sep', new='/')
def test_match_returns_dirs_when_dirs_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa/', 'abcde/'])
@mock.patch(glob_function,
new=lambda text: ['/expand/ed/abcde', '/expand/ed/aaaaa'])
@mock.patch('os.path.expanduser',
new=lambda text: text.replace('~', '/expand/ed'))
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_tilde_stays_pretty(self):
self.assertEqual(sorted(self.completer.matches(4, '"~/a')),
['~/aaaaa', '~/abcde'])
@mock.patch('os.path.sep', new='/')
def test_formatting_takes_just_last_part(self):
self.assertEqual(self.completer.format('/hello/there/'), 'there/')
self.assertEqual(self.completer.format('/hello/there'), 'there')
class MockNumPy(object):
"""This is a mock numpy object that raises an error when there is an atempt
to convert it to a boolean."""
<|fim▁hole|>
class TestDictKeyCompletion(unittest.TestCase):
def test_set_of_keys_returned_when_matches_found(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertSetEqual(com.matches(2, "d[", locals_=local),
set(["'ab']", "'cd']"]))
def test_none_returned_when_eval_error(self):
com = autocomplete.DictKeyCompletion()
local = {'e': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(2, "d[", locals_=local), None)
def test_none_returned_when_not_dict_type(self):
com = autocomplete.DictKeyCompletion()
local = {'l': ["ab", "cd"]}
self.assertEqual(com.matches(2, "l[", locals_=local), None)
def test_none_returned_when_no_matches_left(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(3, "d[r", locals_=local), None)
def test_obj_that_does_not_allow_conversion_to_bool(self):
com = autocomplete.DictKeyCompletion()
local = {'mNumPy': MockNumPy()}
self.assertEqual(com.matches(7, "mNumPy[", locals_=local), None)
class Foo(object):
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
class OldStyleFoo:
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
skip_old_style = unittest.skipIf(py3,
'In Python 3 there are no old style classes')
class Properties(Foo):
@property
def asserts_when_called(self):
raise AssertionError("getter method called")
class Slots(object):
__slots__ = ['a', 'b']
class TestAttrCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.AttrCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.', locals_={'a': Foo()}),
set(['a.method', 'a.a', 'a.b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.',
locals_={'a': OldStyleFoo()}),
set(['a.method', 'a.a', 'a.b']))
self.assertIn(u'a.__dict__',
self.com.matches(4, 'a.__',
locals_={'a': OldStyleFoo()}))
@skip_old_style
def test_att_matches_found_on_old_style_class_object(self):
self.assertIn(u'A.__dict__',
self.com.matches(4, 'A.__', locals_={'A': OldStyleFoo}))
@skip_old_style
def test_issue536(self):
class OldStyleWithBrokenGetAttr:
def __getattr__(self, attr):
raise Exception()
locals_ = {'a': OldStyleWithBrokenGetAttr()}
self.assertIn(u'a.__module__',
self.com.matches(4, 'a.__', locals_=locals_))
def test_descriptor_attributes_not_run(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'a.', locals_={'a': Properties()}),
set(['a.b', 'a.a', 'a.method',
'a.asserts_when_called']))
def test_slots_not_crash(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'A.', locals_={'A': Slots}),
set(['A.b', 'A.a', 'A.mro']))
class TestExpressionAttributeCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.ExpressionAttributeCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [Foo()]}),
set(['method', 'a', 'b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [OldStyleFoo()]}),
set(['method', 'a', 'b']))
def test_other_getitem_methods_not_called(self):
class FakeList(object):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_tuples_complete(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
@unittest.skip('TODO, subclasses do not complete yet')
def test_list_subclasses_complete(self):
class ListSubclass(list):
pass
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': ListSubclass([Foo()])}),
set(['method', 'a', 'b']))
def test_getitem_not_called_in_list_subclasses_overriding_getitem(self):
class FakeList(list):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_literals_complete(self):
self.assertSetEqual(self.com.matches(10, '[a][0][0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
def test_dictionaries_complete(self):
self.assertSetEqual(self.com.matches(7, 'a["b"].',
locals_={'a': {'b': Foo()}}),
set(['method', 'a', 'b']))
class TestMagicMethodCompletion(unittest.TestCase):
def test_magic_methods_complete_after_double_underscores(self):
com = autocomplete.MagicMethodCompletion()
block = "class Something(object)\n def __"
self.assertSetEqual(com.matches(10, ' def __', current_block=block),
set(autocomplete.MAGIC_METHODS))
Comp = namedtuple('Completion', ['name', 'complete'])
@unittest.skipUnless(has_jedi, "jedi required")
class TestMultilineJediCompletion(unittest.TestCase):
def test_returns_none_with_single_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(2, 'Va', current_block='Va', history=[]),
None)
def test_returns_non_with_blank_second_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(0, '', current_block='class Foo():\n',
history=['class Foo():']), None)
def matches_from_completions(self, cursor, line, block, history,
completions):
with mock.patch('bpython.autocomplete.jedi.Script') as Script:
script = Script.return_value
script.completions.return_value = completions
com = autocomplete.MultilineJediCompletion()
return com.matches(cursor, line, current_block=block,
history=history)
def test_completions_starting_with_different_letters(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('Cbc', 'bc')])
self.assertEqual(matches, None)
def test_completions_starting_with_different_cases(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('ade', 'de')])
self.assertSetEqual(matches, set(['ade']))
@unittest.skipUnless(is_py34, 'asyncio required')
def test_issue_544(self):
com = autocomplete.MultilineJediCompletion()
code = '@asyncio.coroutine\ndef'
history = ('import asyncio', '@asyncio.coroutin')
com.matches(3, 'def', current_block=code, history=history)
class TestGlobalCompletion(unittest.TestCase):
def setUp(self):
self.com = autocomplete.GlobalCompletion()
def test_function(self):
def function():
pass
self.assertEqual(self.com.matches(8, 'function',
locals_={'function': function}),
set(('function(', )))
def test_completions_are_unicode(self):
for m in self.com.matches(1, 'a', locals_={'abc': 10}):
self.assertIsInstance(m, type(u''))
@unittest.skipIf(py3, "in Python 3 invalid identifiers are passed through")
def test_ignores_nonascii_encodable(self):
self.assertEqual(self.com.matches(3, 'abc', locals_={'abcß': 10}),
None)
def test_mock_kwlist(self):
with mock.patch.object(keyword, 'kwlist', new=['abcd']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
def test_mock_kwlist_non_ascii(self):
with mock.patch.object(keyword, 'kwlist', new=['abcß']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
class TestParameterNameCompletion(unittest.TestCase):
def test_set_of_params_returns_when_matches_found(self):
def func(apple, apricot, banana, carrot):
pass
if py3:
argspec = list(inspect.getfullargspec(func))
else:
argspec = list(inspect.getargspec(func))
argspec = ["func", argspec, False]
com = autocomplete.ParameterNameCompletion()
self.assertSetEqual(com.matches(1, "a", argspec=argspec),
set(['apple=', 'apricot=']))
self.assertSetEqual(com.matches(2, "ba", argspec=argspec),
set(['banana=']))
self.assertSetEqual(com.matches(3, "car", argspec=argspec),
set(['carrot=']))<|fim▁end|> | def __nonzero__(self):
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
|
<|file_name|>training_utils.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import atexit
import collections
from collections import OrderedDict
import functools
import multiprocessing.pool
import threading
import time
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
@six.add_metaclass(abc.ABCMeta)
class Aggregator(object):
"""Abstract base class used to aggregate batch-level outputs of a loop.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size * num_batches`.
steps: Total number of steps.
batch_size: Batch size. It is used for validation checks between inputs and
outputs.
results: What to return at the end of the aggregation loop.
"""
def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None):
self.use_steps = use_steps
self.num_samples = num_samples
self.steps = steps
self.batch_size = batch_size
self.results = []
@abc.abstractmethod
def create(self, batch_outs):
"""Creates the initial results from the first batch outputs.
Arguments:
batch_outs: A list of batch-level outputs.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
"""Aggregates batch-level results into total results.
Arguments:
batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if `use_steps`
is `True`.
batch_end: The end index of this batch. Always `None` if `use_steps` is
`True`.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def finalize(self):
"""Prepares the total results to be returned."""
raise NotImplementedError('Must be implemented in subclasses.')
class MetricsAggregator(Aggregator):
"""Aggregator that calculates loss and metrics info.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size*num_batches`.
steps: Total number of steps, ie number of times to iterate over a dataset
to cover all samples.
"""
def __init__(self, use_steps, num_samples=None, steps=None):
super(MetricsAggregator, self).__init__(
use_steps=use_steps,
num_samples=num_samples,
steps=steps,
batch_size=None)
def create(self, batch_outs):
self.results = [0.] * len(batch_outs)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
# Loss.
if self.use_steps:
self.results[0] += batch_outs[0]
else:
self.results[0] += batch_outs[0] * (batch_end - batch_start)
# Metrics (always stateful, just grab current values.)
self.results[1:] = batch_outs[1:]
def finalize(self):
if not self.results:
raise ValueError('Empty training data.')
self.results[0] /= (self.num_samples or self.steps)
class ConcatAggregator(Aggregator):
"""Combine tensor-likes which cannot be merged on the fly.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
"""
def __init__(self, batch_size):
self.composite = None
super(ConcatAggregator, self).__init__(
use_steps=True, num_samples=None, steps=None, batch_size=batch_size)
def create(self, batch_element):
self.composite = composite_tensor_utils.is_composite_or_composite_value(
batch_element)
def aggregate(self, batch_element, batch_start=None, batch_end=None):
# TODO(psv): Add num_samples check here to detect when output batch
# #samples is < batch size and != input batch #samples.
if self.batch_size and self.batch_size < batch_element.shape[0]:
raise ValueError(
'Mismatch between expected batch size and model output batch size. '
'Output shape = {}, expected output shape = shape {}'.format(
batch_element.shape,
(self.batch_size,) + batch_element.shape[1:]))
self.results.append(batch_element)
def finalize(self):
# Special case of single batch inference which skips a copy.
if len(self.results) == 1:
self.results = self.results[0]
elif self.composite:
# TODO(taylorrobie): efficiently concatenate.
results = self.results[0]
for r in self.results[1:]:
results = composite_tensor_utils.append_composite_tensor(results, r)
self.results = results
else:
self.results = np.concatenate(self.results, axis=0)
if isinstance(self.results, ops.EagerTensor):
self.results = self.results._numpy() # pylint: disable=protected-access
<|fim▁hole|>_COPY_THREADS = 4
_COPY_POOL = None
def get_copy_pool():
"""Shared threadpool for copying arrays.
Pool instantiation takes ~ 2ms, so a singleton pool is used rather than
creating a pool per SliceAggregator.
Returns:
The global copy threadpool.
"""
global _COPY_POOL
if _COPY_POOL is None:
_COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)
atexit.register(_COPY_POOL.close)
return _COPY_POOL
class SliceAggregator(Aggregator):
"""Combine arrays where the final size is known.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
NumPy copies are an operation that threads handle quite well because all of
the heavy lifting is in c and does not need the GIL. Moreover, we can perform
lock-free writes to the same buffer in multiple threads because the nature of
result aggregation guarantees that either the indices are disjoint or the
aggregator will throw an exception in finalize. Moreover, because aggregation
is performed on the slowest varying dimension, assignments for a given batch
will write to contiguous blocks of memory, further minimizing contention.
There is, however, some scheduling and context switching overhead which will
offset the gains from pipelining the slice assignment. Below a given threshold
it is faster to simply assign in the main thread rather than enqueue the
assignment in a side thread. The exact threshold will vary from system to
system, but the time is not very sensitive to the exact transition so a value
of 2 ** 14 was chosen which should be reasonable on most systems.
"""
_BINARY_SIZE_THRESHOLD = 2 ** 14
_MAX_COPY_SECONDS = 300
def __init__(self, num_samples, batch_size):
self._async_copies = []
self._pool = get_copy_pool()
self._errors = []
super(SliceAggregator, self).__init__(
use_steps=False,
num_samples=num_samples,
steps=None,
batch_size=batch_size)
def create(self, batch_element):
# This step does not need to be pipelined because NumPy empty array
# initialization is effectively instantaneous.
shape = (self.num_samples,) + batch_element.shape[1:]
dtype = batch_element.dtype
if isinstance(batch_element, ops.EagerTensor):
dtype = dtype.as_numpy_dtype
self.results = np.empty(shape=shape, dtype=dtype)
def aggregate(self, batch_element, batch_start, batch_end):
# Fail early.
if self._errors:
six.reraise(type(self._errors[0]), self._errors[0])
# In the special case of single batch inference, no copy is needed.
if batch_end - batch_start == self.num_samples:
if self.num_samples != batch_element.shape[0]:
raise ValueError(
'Mismatch between expected batch size and model output batch size. '
'Output shape = {}, expected output shape = shape {}'.format(
batch_element.shape, self.results.shape))
self.results = batch_element
return
# This is an approximate threshold, so we don't need to consider the number
# of bytes per element.
num_elements = np.prod(batch_element.shape)
if num_elements < self._BINARY_SIZE_THRESHOLD:
self.results[batch_start:batch_end] = batch_element
else:
is_finished = threading.Event()
self._pool.apply_async(
self._slice_assign,
args=(batch_element, batch_start, batch_end, is_finished))
self._async_copies.append(is_finished)
def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):
try:
self.results[batch_start:batch_end] = batch_element
except Exception as e: # pylint: disable=broad-except
# `_slice_assign` should only be called in threads and exceptions raised
# in threads do not carry over to the main thread. So instead we perform a
# a broad catch in the thread and then store the exception to be re-raised
# in the main thread.
self._errors.append(e)
finally:
is_finished.set()
def finalize(self):
start_time = time.time()
for is_finished in self._async_copies:
timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)])
if not is_finished.wait(timeout):
raise ValueError('Timed out waiting for copy to complete.')
if self._errors:
six.reraise(self._errors[0].__class__, self._errors[0])
class OutputsAggregator(Aggregator):
"""Aggregator that concatenates outputs."""
_structure = None
def create(self, batch_outs):
# SparseTensorValue is a named tuple which nest will flatten, so we need
# to guard it to properly handle the structure.
self._structure = nest.get_traverse_shallow_structure(
lambda x: not composite_tensor_utils.is_composite_or_composite_value(x),
batch_outs)
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element in batch_outs:
if composite_tensor_utils.is_composite_or_composite_value(batch_element):
# If the output is not a ndarray, it will be either a composite tensor
# or a composite tensor's Value object. In either case, we can't
# allocate an array to hold the object - we'll handle it later.
self.results.append(ConcatAggregator(self.batch_size))
elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)):
self.results.append(
(ConcatAggregator(self.batch_size) if self.use_steps else
SliceAggregator(self.num_samples, self.batch_size)))
else:
# This is not a ndarray, a CompositeTensor, or a CompositeTensorValue.
# Fail fast rather than trying to concatenate it.
raise RuntimeError('Attempted to aggregate unsupported object {}.'
.format(batch_element))
self.results[-1].create(batch_element)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
batch_outs = nest.flatten_up_to(self._structure, batch_outs)
for batch_element, result in zip(batch_outs, self.results):
result.aggregate(batch_element, batch_start, batch_end)
def finalize(self):
for result in self.results:
result.finalize()
self.results = [i.results for i in self.results]
self.results = nest.pack_sequence_as(self._structure, self.results)
def get_progbar(model, count_mode, include_metrics=True):
"""Get Progbar."""
if include_metrics:
stateful_metric_names = getattr(model, 'metrics_names', None)
if stateful_metric_names:
stateful_metric_names = stateful_metric_names[1:] # Exclude `loss`
else:
stateful_metric_names = None
return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names)
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Arguments:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished. Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None and batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x, expected_shape=None):
"""Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1."""
if x is None:
return None
if composite_tensor_utils.is_composite_or_composite_value(x):
return x
if isinstance(x, int):
raise ValueError(
'Expected an array data type but received an integer: {}'.format(x))
if (x.shape is not None and len(x.shape) == 1 and
(expected_shape is None or len(expected_shape) != 1)):
if tensor_util.is_tensor(x):
x = array_ops.expand_dims(x, axis=1)
else:
x = np.expand_dims(x, 1)
return x
def standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that the batch axis of the
arrays matches the expected value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
try:
data_len = len(data)
except TypeError:
# For instance if data is `None` or a symbolic Tensor.
data_len = None
if not names:
if data_len and not isinstance(data, dict):
raise ValueError(
'Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
try:
data = [
data[x].values
if data[x].__class__.__name__ == 'DataFrame' else data[x]
for x in names
]
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
elif isinstance(data, (list, tuple)):
if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
else:
data = [
x.values if x.__class__.__name__ == 'DataFrame' else x for x in data
]
else:
data = data.values if data.__class__.__name__ == 'DataFrame' else data
data = [data]
if shapes is not None:
data = [
standardize_single_array(x, shape) for (x, shape) in zip(data, shapes)
]
else:
data = [standardize_single_array(x) for x in data]
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' + exception_prefix +
': the list of Numpy arrays that you are passing to '
'your model is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), ' +
'for inputs ' + str(names) + ' but instead got the '
'following list of ' + str(len(data)) + ' arrays: ' +
str(data)[:200] + '...')
elif len(names) > 1:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as input to your model, '
'but the model expects a list of ' + str(len(names)) +
' Numpy arrays instead. The list you passed was: ' +
str(data)[:200])
elif len(data) == 1 and not hasattr(data[0], 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, or list/dict of '
'Numpy arrays. Found: ' + str(data)[:200] + '...')
elif len(names) == 1:
data = [np.asarray(data)]
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is not None:
if tensor_util.is_tensor(data[i]):
tensorshape = data[i].shape
if not tensorshape:
continue
data_shape = tuple(tensorshape.as_list())
elif composite_tensor_utils.is_composite_or_composite_value(data[i]):
tensorshape = composite_tensor_utils.get_shape(data[i])
data_shape = tuple(tensorshape.as_list())
else:
data_shape = data[i].shape
shape = shapes[i]
if len(data_shape) != len(shape):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have ' +
str(len(shape)) + ' dimensions, but got array '
'with shape ' + str(data_shape))
if not check_batch_axis:
data_shape = data_shape[1:]
shape = shape[1:]
for dim, ref_dim in zip(data_shape, shape):
if ref_dim != dim and ref_dim is not None and dim is not None:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shape) + ' but got array with shape ' +
str(data_shape))
return data
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or (isinstance(x_weight, (list, tuple)) and
len(x_weight) == 0): # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, (list, tuple)):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, collections.Mapping):
generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list or a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes,
check_all_flat=False):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(nest.flatten(sample_weights)))
nest.assert_same_structure(
list_to_tuple(outputs),
list_to_tuple(nest.flatten(outputs)))
if sample_weight_modes is not None:
nest.assert_same_structure(
sample_weight_modes, nest.flatten(sample_weight_modes))
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else array_ops.shape(output)
is_temporal = (
sample_weight_modes is not None and
sample_weight_modes[i] == 'temporal')
sw_shape = (output_shape[0],
output_shape[1]) if is_temporal else (output_shape[0],)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))
else:
new_sample_weights.append(sw)
return (list_to_tuple(new_sample_weights),
any_sample_weight, partial_sample_weight)
def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def is_tensor_or_composite_tensor(x):
return tensor_util.is_tensor(
x) or composite_tensor_utils.is_composite_or_composite_value(x)
def set_of_lengths(x):
# Returns a set with the variation between
# different shapes, with None => 0
if x is None:
return {}
else:
return set([
y.shape[0]
for y in x
if y is not None and not is_tensor_or_composite_tensor(y)
])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_loss_fns = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,
losses.CategoricalCrossentropy)
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if y is None or loss is None or tensor_util.is_tensor(y):
continue
if losses.is_categorical_crossentropy(loss):
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' +
str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)
if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and
(loss.fn in key_loss_fns))):
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
loss_name = loss.name
if loss_name is None:
loss_type = loss.fn if is_loss_wrapper else type(loss)
loss_name = loss_type.__name__
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss_name + '`. '
'This loss expects targets to have the same shape '
'as the output.')
def collect_per_output_metric_info(metrics,
output_names,
output_shapes,
loss_fns,
is_weighted=False):
"""Maps metric names and functions to model outputs.
Arguments:
metrics: a list or a list of lists or a dict of metric functions.
output_names: a list of the names (strings) of model outputs.
output_shapes: a list of the shapes (strings) of model outputs.
loss_fns: a list of the loss functions corresponding to the model outputs.
is_weighted: Boolean indicating whether the given metrics are weighted.
Returns:
A list (one entry per model output) of dicts.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like: `[{
'acc': binary_accuracy(),
'ce': binary_crossentropy(),
}, {
'acc': binary_accuracy(),
}]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [{} for _ in output_names]
if isinstance(metrics, list):
any_sub_list = any(isinstance(m, list) for m in metrics)
if any_sub_list:
if len(metrics) != len(output_names):
raise ValueError('When passing a list of lists as `metrics`, '
'it should have one entry per model output. '
'The model has ' + str(len(output_names)) +
' outputs, but you passed metrics=' + str(metrics))
# User has provided a list of len = len(outputs).
nested_metrics = [generic_utils.to_list(m) for m in metrics]
else:
# If it is a single list we then apply all metrics to all outputs.
if len(output_names) > 1:
nested_metrics = []
for _ in output_names:
nested_metrics.append(
[metrics_module.clone_metric(m) for m in metrics])
else:
nested_metrics = [metrics]
elif isinstance(metrics, collections.Mapping):
generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)
nested_metrics = []
for name in output_names:
output_metrics = generic_utils.to_list(metrics.get(name, []))
nested_metrics.append(output_metrics)
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
per_output_metrics = []
for i, metrics in enumerate(nested_metrics):
metrics_dict = OrderedDict()
for metric in metrics:
metric_name = get_metric_name(metric, is_weighted)
metric_fn = get_metric_function(
metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])
# If the metric function is not stateful, we create a stateful version.
if not isinstance(metric_fn, metrics_module.Metric):
metric_fn = metrics_module.MeanMetricWrapper(
metric_fn, name=metric_name)
metrics_dict[metric_name] = metric_fn
per_output_metrics.append(metrics_dict)
return per_output_metrics
def batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Arguments:
y: Numpy array or Tensor of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
# Iterator may return sample_weight as 1-tuple
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != 'samplewise':
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape {}. In order to '
'use timestep-wise sample weights, you should specify '
'sample_weight_mode="temporal" in compile(); found "{}" '
'instead. If you just mean to use sample-wise weights, '
'make sure your sample_weight array is 1D.'
.format(sample_weight.shape, sample_weight_mode))
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if (not tensor_util.is_tensor(sample_weight) and
y.shape[:sample_weight.ndim] != sample_weight.shape):
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
# Class weights applied per-sample.
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if tensor_util.is_tensor(y):
# Few classes are expected, so densifying is reasonable.
keys = np.array(sorted(class_weight.keys()))
values = np.array([class_weight[i] for i in keys])
weight_vector = np.zeros(np.max(keys) + 1)
weight_vector[:] = np.nan
weight_vector[keys] = values
y_classes = smart_cond.smart_cond(
len(y.shape.as_list()) == 2 and K.shape(y)[1] > 1,
lambda: K.argmax(y, axis=1),
lambda: math_ops.cast(K.reshape(y, (-1,)), dtypes.int64)
)
class_sample_weight = array_ops.gather(weight_vector, y_classes)
gen_array_ops.check_numerics(
class_sample_weight,
'Invalid classes or class weights detected. NaN values indicate that '
'an appropriate class weight could not be determined.')
class_sample_weight = math_ops.cast(class_sample_weight, K.floatx())
if sample_weight is not None:
sample_weight = math_ops.cast(
ops.convert_to_tensor_v2(sample_weight), K.floatx())
else:
y_classes = y
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
class_sample_weight = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(class_sample_weight) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError(
'`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' % (existing_classes - existing_class_weight))
if class_sample_weight is not None and sample_weight is not None:
# Multiply weights if both are provided.
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None
def has_symbolic_tensors(ls):
if context.executing_eagerly():
return False
return has_tensors(ls)
def has_tensors(ls):
if isinstance(ls, (list, tuple)):
return any(tensor_util.is_tensor(v) for v in ls)
if isinstance(ls, dict):
return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls))
return tensor_util.is_tensor(ls)
def get_metric_name(metric, weighted=False):
"""Returns the name corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
weighted: Boolean indicating if the given metric is weighted.
Returns:
The metric name.
"""
if tf2.enabled():
# We keep the string that the user has set in compile as the metric name.
if isinstance(metric, six.string_types):
return metric
metric = metrics_module.get(metric)
return metric.name if hasattr(metric, 'name') else metric.__name__
else:
metric_name_prefix = 'weighted_' if weighted else ''
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, 'name'):
suffix = metric_fn.name
else:
suffix = metric_fn.__name__
metric_name = metric_name_prefix + suffix
return metric_name
def get_metric_function(metric, output_shape=None, loss_fn=None):
"""Returns the metric function corresponding to the given metric input.
Arguments:
metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be calculated
for.
loss_fn: The loss function used.
Returns:
The metric function.
"""
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
return metrics_module.get(metric)
is_sparse_categorical_crossentropy = (
isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.sparse_categorical_crossentropy))
is_binary_crossentropy = (
isinstance(loss_fn, losses.BinaryCrossentropy) or
(isinstance(loss_fn, losses.LossFunctionWrapper) and
loss_fn.fn == losses.binary_crossentropy))
if metric in ['accuracy', 'acc']:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_accuracy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_accuracy
# If the output_shape[-1] is not 1, then we know output is `categorical`.
# We assume it is sparse categorical only if loss is explicitly given
# as sparse categorical crossentropy loss.
return metrics_module.categorical_accuracy
else:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_crossentropy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_crossentropy
return metrics_module.categorical_crossentropy
def call_metric_function(metric_fn,
y_true,
y_pred=None,
weights=None,
mask=None):
"""Invokes metric function and returns the metric result tensor."""
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
if weights is None:
# Use mask as sample weight.
weights = mask
else:
# Update dimensions of weights to match with mask.
weights = math_ops.cast(weights, dtype=y_pred.dtype)
mask, _, weights = tf_losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights)
weights *= mask
if y_pred is not None:
return metric_fn(y_true, y_pred, sample_weight=weights)
# `Mean` metric only takes a single value.
return metric_fn(y_true, sample_weight=weights)
def get_loss_function(loss):
"""Returns the loss corresponding to the loss input in `compile` API."""
if loss is None or isinstance(loss, losses.Loss):
return loss
if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):
# It is not safe to assume that the loss takes no constructor arguments.
raise ValueError(
'Received uninstantiated Loss class: {}\nPlease call loss ""classes '
'before passing them to Model.compile.'.format(loss))
# Deserialize loss configuration, if needed.
if isinstance(loss, collections_abc.Mapping):
loss = losses.get(loss)
# Custom callable class.
if callable(loss) and not hasattr(loss, '__name__'):
return loss
# Wrap loss function with signature `(y_true, y_pred, **kwargs)`
# in `LossFunctionWrapper` class.
loss_fn = losses.get(loss)
# For losses which are given as strings/functions in the compile API,
# we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`
# (both in distribution strategy context and otherwise).
return losses.LossFunctionWrapper(
loss_fn,
name=loss_fn.__name__,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)
class RespectCompiledTrainableState(object):
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returing from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs this
set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access
self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access
# Check to see if any layer's trainable state has changed since `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (layer in self._current_trainable_state and
trainable != self._current_trainable_state[layer]):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access
return False # False values do not suppress exceptions
def validate_dataset_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed.
Arguments:
x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to weight
the importance of each sample in `x`. Expected to be `None` when `x` is a
dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to be
used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user.
"""
if y is not None:
raise ValueError('You passed a dataset or dataset iterator (%s) as '
'input `x` to your model. In that case, you should '
'not specify a target (`y`) argument, since the dataset '
'or dataset iterator generates both input data and '
'target data. '
'Received: %s' % (x, y))
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator. Instead, you'
'can provide sample_weight as the third element of your'
'dataset, i.e. (inputs, targets, sample_weight). '
'Received: x=%s, sample_weight=%s' % (x, sample_weight))
if validation_split is not None and validation_split != 0.0:
raise ValueError(
'`validation_split` argument is not supported when '
'input `x` is a dataset or a dataset iterator. '
'Received: x=%s, validation_split=%f' % (x, validation_split))
def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):
"""Helper function to validate either inputs or targets."""
if isinstance(inp, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in inp):
raise ValueError(
'Please provide as model inputs either a single array or a list of '
'arrays. You passed: {}={}'.format(field_name, str(orig_inp)))
elif isinstance(inp, dict):
if not allow_dict:
raise ValueError(
'You cannot pass a dictionary as model {}.'.format(field_name))
elif not isinstance(inp, np.ndarray) and not tensor_util.is_tensor(inp):
raise ValueError(
'Please provide as model inputs either a single array or a list of '
'arrays. You passed: {}={}'.format(field_name, orig_inp))
def check_generator_arguments(y=None, sample_weight=None,
validation_split=None):
"""Validates arguments passed when using a generator."""
if y is not None:
raise ValueError('`y` argument is not supported when data is'
'a generator or Sequence instance. Instead pass targets'
' as the second element of the generator.')
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when data is'
'a generator or Sequence instance. Instead pass sample'
' weights as the third element of the generator.')
if validation_split:
raise ValueError('If your data is in the form of a Python generator, '
'you cannot use `validation_split`.')
def check_steps_argument(input_data, steps, steps_name):
"""Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Arguments:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
"""
is_x_iterator = isinstance(
input_data, (iterator_ops.Iterator, iterator_ops.OwnedIterator))
if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or
(isinstance(input_data, list) and not input_data)):
if steps is None:
input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'
raise ValueError('When using {input_type} as input to a model, you should'
' specify the `{steps_name}` argument.'.format(
input_type=input_type_str, steps_name=steps_name))
return True
if isinstance(input_data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
return True
if steps is not None:
list_types = (np.ndarray, list, tuple)
if (isinstance(input_data, list_types) or
(isinstance(input_data, dict) and
any(isinstance(v, list_types) for v in input_data.values()))):
logging.warning('When passing input data as arrays, do not specify '
'`steps_per_epoch`/`steps` argument. '
'Please use `batch_size` instead.')
return False
def cast_single_tensor(x, dtype=None):
if isinstance(x, np.ndarray):
x = ops.convert_to_tensor_v2(x)
dtype = dtype or K.floatx()
if x.dtype.is_floating:
return math_ops.cast(x, dtype=dtype)
return x
def cast_if_floating_dtype_and_mismatch(targets, outputs):
"""Returns target data tensors using correct datatype.
Checks that each target and output pair are the same datatype. If not, casts
the target to the output's datatype.
Args:
targets: tensor or list of targets.
outputs: tensor or list of outputs.
Returns:
Targets in appropriate datatype.
"""
if tensor_util.is_tensor(targets):
# There is one target, so output[0] should be the only output.
return cast_single_tensor(targets, dtype=outputs[0].dtype)
new_targets = []
for target, out in zip(targets, outputs):
if isinstance(target, np.ndarray):
target = ops.convert_to_tensor_v2(target)
if target.dtype != out.dtype:
new_targets.append(cast_single_tensor(target, dtype=out.dtype))
else:
new_targets.append(target)
return new_targets
def cast_if_floating_dtype(x, dtype=None):
"""Casts the given data tensors to the default floating point type.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
dtype: The dtype to which Tensors should be cast.
Returns:
Converted input.
"""
return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype),
x)
def cast_to_model_input_dtypes(x, model):
"""Casts the given data tensors to the dtypes of the model inputs.
Args:
x: tensor or list/tuple of tensors.
model: The model.
Returns:
Converted input. Each tensor is casted to the corresponding input in
`model.inputs`.
"""
input_dtypes = nest.map_structure(lambda t: t.dtype, model.inputs)
return nest.map_structure(math_ops.cast, x, input_dtypes)
def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):
"""Prepares sample weight modes for the model.
Args:
training_endpoints: List of model _TrainingEndpoints.
sample_weight_mode: sample weight mode user input passed from compile API.
Raises:
ValueError: In case of invalid `sample_weight_mode` input.
"""
if isinstance(sample_weight_mode, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'sample_weight_mode', sample_weight_mode,
[e.output_name for e in training_endpoints])
for end_point in training_endpoints:
if not end_point.should_skip_target_weights():
if end_point.output_name not in sample_weight_mode:
raise ValueError('Output ' + end_point.output_name +
'missing from `_sample_weight_modes` dictionary')
else:
end_point.sample_weight_mode = sample_weight_mode.get(
end_point.output_name)
elif isinstance(sample_weight_mode, (list, tuple)):
if len(sample_weight_mode) != len(training_endpoints):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed ' +
str(len(sample_weight_mode)) + '_sample_weight_modes.')
for mode, endpoint in zip(sample_weight_mode, training_endpoints):
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = mode
else:
for endpoint in training_endpoints:
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = sample_weight_mode
def prepare_loss_functions(loss, output_names):
"""Converts loss to a list of loss functions.
Arguments:
loss: String (name of objective function), objective function or
`tf.losses.Loss` instance. See `tf.losses`. If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be minimized by
the model will then be the sum of all individual losses.
output_names: List of model output names.
Returns:
A list of loss objective functions.
Raises:
ValueError: If loss is a dict with keys not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if isinstance(loss, collections_abc.Mapping):
generic_utils.check_for_unexpected_keys('loss', loss, output_names)
loss_functions = []
for name in output_names:
if name not in loss:
logging.warning(
'Output {0} missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to {0}.'.format(name))
loss_functions.append(get_loss_function(loss.get(name, None)))
elif isinstance(loss, six.string_types):
loss_functions = [get_loss_function(loss) for _ in output_names]
elif isinstance(loss, collections_abc.Sequence):
if len(loss) != len(output_names):
raise ValueError('When passing a list as loss, it should have one entry '
'per model outputs. The model has {} outputs, but you '
'passed loss={}'.format(len(output_names), loss))
loss_functions = nest.map_structure(get_loss_function, loss)
else:
loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]
return loss_functions
def prepare_loss_weights(training_endpoints, loss_weights=None):
"""Converts loss weights to a list of loss weights.
The result loss weights will be populated on the training endpoint.
Arguments:
training_endpoints: List of model training endpoints.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then be
the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients. If a list, it is expected to have a 1:1
mapping to the model's outputs. If a dict, it is expected to map
output names (strings) to scalar coefficients.
Raises:
ValueError: If loss weight is a dict with key not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if loss_weights is None:
for e in training_endpoints:
e.loss_weight = 1.
elif isinstance(loss_weights, collections.Mapping):
generic_utils.check_for_unexpected_keys(
'loss_weights', loss_weights,
[e.output_name for e in training_endpoints])
for e in training_endpoints:
e.loss_weight = loss_weights.get(e.output_name, 1.)
elif isinstance(loss_weights, list):
if len(loss_weights) != len(training_endpoints):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(training_endpoints)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
for w, e in zip(loss_weights, training_endpoints):
e.loss_weight = w
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# TODO(rohanj): This is a hack to get around not depending on feature_column and
# create a cyclical dependency. Figure out a cleaner solution
def is_feature_layer(layer):
"""Returns whether `layer` is a FeatureLayer or not."""
return getattr(layer, '_is_feature_layer', False)
def is_eager_dataset_or_iterator(data):
return context.executing_eagerly() and isinstance(
data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.OwnedIterator))
# pylint: disable=protected-access
def assert_not_batched(dataset):
"""Asserts that `dataset` is not batched.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is batched.
Example usage:
```python
try:
assert_not_batched(dataset)
# safe to assume `dataset` it not batched here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_batched(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.ShuffleDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_batched(input_dataset)
return
raise ValueError('Could not assert that dataset is not batched.')
# pylint: disable=protected-access
def assert_not_shuffled(dataset):
"""Asserts that `dataset` is not shuffled.
The algorithm used by this method is sound but not complete. In other words,
if the method fails to establish the assertion, it does not mean the dataset
is shuffled.
Example usage:
```python
try:
assert_not_shuffled(dataset)
# safe to assume `dataset` it not shuffled here
expect ValueError:
# make no assumptions about `dataset`
```
Args:
dataset: The dataset to analyze.
Raises:
ValueError: If the method cannot establish the assertion.
"""
if isinstance(dataset, dataset_ops.DatasetV1Adapter):
return assert_not_shuffled(dataset._dataset)
else:
whitelisted_types = [
dataset_ops._OptionsDataset,
dataset_ops.BatchDataset,
dataset_ops.ConcatenateDataset,
dataset_ops.CacheDataset,
dataset_ops.FilterDataset,
dataset_ops.MapDataset,
dataset_ops.PaddedBatchDataset,
dataset_ops.ParallelMapDataset,
dataset_ops.PrefetchDataset,
dataset_ops.RangeDataset,
dataset_ops.RepeatDataset,
dataset_ops.SkipDataset,
dataset_ops.SparseTensorSliceDataset,
dataset_ops.TakeDataset,
dataset_ops.TensorDataset,
dataset_ops.TensorSliceDataset,
dataset_ops.WindowDataset,
dataset_ops.ZipDataset,
readers.FixedLengthRecordDatasetV2,
readers.TextLineDatasetV2,
readers.TFRecordDatasetV2,
]
for ty in whitelisted_types:
if isinstance(dataset, ty):
for input_dataset in dataset._inputs():
assert_not_shuffled(input_dataset)
return
raise ValueError('Could not assert that dataset is not shuffled.')
def verify_dataset_shuffled(x):
"""Verifies that the dataset is shuffled.
Args:
x: Dataset passed as an input to the model.
Raises:
ValueError: if the dataset is not already shuffled.
"""
assert isinstance(x, dataset_ops.DatasetV2)
try:
assert_not_shuffled(x)
except ValueError:
# Dataset may or may not be shuffled.
return
else:
logging.warning('Expected a shuffled dataset but input dataset `x` is '
'not shuffled. Please invoke `shuffle()` on input dataset.')
def is_dataset_or_iterator(data):
return isinstance(data, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator, iterator_ops.OwnedIterator))
def get_iterator(dataset):
"""Create and initialize an iterator from a dataset."""
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(dataset)
else:
iterator = dataset_ops.make_initializable_iterator(dataset)
initialize_iterator(iterator)
return iterator
def initialize_iterator(iterator):
if not context.executing_eagerly():
init_op = iterator.initializer
K.get_session((init_op,)).run(init_op)
def extract_tensors_from_dataset(dataset):
"""Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.
Arguments:
dataset: Dataset instance.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
def unpack_iterator_input(iterator):
"""Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.
Arguments:
iterator: Instance of a dataset iterator.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
try:
next_element = iterator.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3 '
'elements: (input, target) or (input, target, sample_weights) '
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
weights = None
else:
x, y, weights = next_element
else:
x = next_element
y = None
weights = None
return x, y, weights
def infer_steps_for_dataset(model,
dataset,
steps,
epochs=1,
steps_name='steps'):
"""Infers steps_per_epoch needed to loop through a dataset.
Arguments:
model: Keras model instance.
dataset: Input data of type tf.data.Dataset.
steps: Number of steps to draw from the dataset (may be None if unknown).
epochs: Number of times to iterate over the dataset.
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
Returns:
Integer or `None`. Inferred number of steps to loop through the dataset.
`None` is returned if 1) the size of the dataset is unknown and `steps` was
not specified, or 2) this is multi-worker training and auto sharding is
enabled.
Raises:
ValueError: In case of invalid argument values.
"""
assert isinstance(dataset, dataset_ops.DatasetV2)
if (model._in_multi_worker_mode() and
(dataset.options().experimental_distribute.auto_shard_policy !=
AutoShardPolicy.OFF)):
# If the dataset would be auto-sharded, we should not infer a local
# steps_per_epoch due to the possible inbalanced sharding between workers.
return None
size = K.get_value(cardinality.cardinality(dataset))
if size == cardinality.INFINITE and steps is None:
raise ValueError('When passing an infinitely repeating dataset, you '
'must specify the `%s` argument.' % (steps_name,))
if size >= 0:
if steps is not None and steps * epochs > size:
if epochs > 1:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `epochs=%s` and `%s=%s`, which is a total of '
'%s steps. We cannot draw that many steps from this '
'dataset. We suggest to set `%s=%s`.' %
(size, epochs, steps_name, steps, steps * epochs,
steps_name, size // epochs))
else:
raise ValueError('The dataset you passed contains %s batches, but you '
'passed `%s=%s`. We cannot draw that many steps from '
'this dataset. We suggest to set `%s=%s`.' %
(size, steps_name, steps, steps_name, size))
if steps is None:
if size >= 0:
return size
return None
return steps
class ModelInputs(object):
"""Encapsulates model inputs.
Allows for transforming model inputs while keeping the same structure.
"""
def __init__(self, inputs):
self._inputs = inputs
self._is_dict = isinstance(self._inputs, dict)
self._is_single_input = not isinstance(self._inputs, (list, tuple, dict))
self._flattened_inputs = []
self._input_names = []
if self._is_dict:
for k in sorted(self._inputs.keys()):
self._flattened_inputs.append(self._inputs[k])
self._input_names.append(k)
else:
self._flattened_inputs = nest.flatten(self._inputs)
self._input_names = [
'input_%d' % (i + 1) for i in range(len(self._flattened_inputs))
]
def get_input_names(self):
"""Returns keys to name inputs by.
In case inputs provided were a list, tuple or single entry, we make up a
key 'input_%d'. For dictionary case, we return a sorted list of keys.
"""
return self._input_names
def get_symbolic_inputs(self, return_single_as_list=False):
"""Returns inputs to be set as self.inputs for a model."""
# TODO(karmel): There is a side-effect here where what you get
# with as_list and as_dict depends on whether you have called this
# method first, since it modifies in place.
for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):
if isinstance(v, (list, float, int)):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray, ops.EagerTensor)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + tuple(v.shape[1:])
if shape == (None,):
shape = (None, 1)
dtype = dtypes.as_dtype(v.dtype)
if dtype.is_floating:
dtype = K.floatx()
v = K.placeholder(shape=shape, name=k, dtype=dtype)
elif isinstance(v, tensor_spec.TensorSpec):
shape = (None,) + tuple(v.shape.as_list()[1:])
if shape == (None,):
shape = (None, 1)
v = K.placeholder(shape=shape, name=k, dtype=v.dtype)
self._flattened_inputs[i] = v
if self._is_dict:
return dict(zip(self._input_names, self._flattened_inputs))
if self._is_single_input and not return_single_as_list:
return self._flattened_inputs[0]
return self._flattened_inputs
def as_dict(self):
"""An iterable over a dictionary version of inputs."""
for k, v in zip(self._input_names, self._flattened_inputs):
yield k, v
def as_list(self):
"""Returning the inputs as a list."""
return self._flattened_inputs
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if hasattr(layer, '_batch_input_shape'):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Arguments:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.as_dimension(batch_input_shape[0]).value
return None
def generic_output_names(outputs_list):
return ['output_%d' % (i + 1) for i in range(len(outputs_list))]
def convert_eager_tensors_to_numpy(structure):
"""Convert every EagerTensor in `structure` to NumPy.
Arguments:
structure: An arbitrary structure of elements to be converted to NumPy
arrays.
Returns:
An identical structure with EagerTensors converted to NumPy arrays.
"""
def _convert(element):
if isinstance(element, ops.EagerTensor):
return element.numpy()
return element
return nest.map_structure(_convert, structure)
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch.
Arguments:
validation_freq: Integer or list. If an integer, specifies how many training
epochs to run before a new validation run is performed. If a list,
specifies the epochs on which to run validation.
epoch: Integer, the number of the training epoch just completed.
Returns:
Bool, True if validation should be run.
Raises:
ValueError: if `validation_freq` is an Integer and less than 1, or if
it is neither an Integer nor a Sequence.
"""
# `epoch` is 0-indexed internally but 1-indexed in the public API.
one_indexed_epoch = epoch + 1
if isinstance(validation_freq, int):
if validation_freq < 1:
raise ValueError('`validation_freq` can not be less than 1.')
return one_indexed_epoch % validation_freq == 0
if not isinstance(validation_freq, collections_abc.Container):
raise ValueError('`validation_freq` must be an Integer or '
'`collections_abc.Container` (e.g. list, tuple, etc.)')
return one_indexed_epoch in validation_freq
def split_training_and_validation_data(x, y, sample_weights, validation_split):
"""Split input data into train/eval section based on validation_split."""
if has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (generic_utils.slice_arrays(x, 0, split_at),
generic_utils.slice_arrays(x, split_at))
y, val_y = (generic_utils.slice_arrays(y, 0, split_at),
generic_utils.slice_arrays(y, split_at))
if sample_weights:
sample_weights, val_sample_weights = (
generic_utils.slice_arrays(sample_weights, 0, split_at),
generic_utils.slice_arrays(sample_weights, split_at),
)
else:
val_sample_weights = None
return x, y, sample_weights, val_x, val_y, val_sample_weights
def unpack_validation_data(validation_data, raise_if_ambiguous=True):
"""Unpack validation data based input type.
The validation data is not touched if its dataset or dataset iterator.
For other type of input (Numpy or tensor), it will be unpacked into tuple of
3 which is x, y and sample weights.
Args:
validation_data: dataset, dataset iterator, or numpy, tensor tuple.
raise_if_ambiguous: boolean on whether to fail if validation_data cannot be
parsed. Otherwise simply return validation_data, None, None and defer the
decision to the caller.
Returns:
tuple of 3, (x, y, sample_weights) for numpy and tensor input.
"""
if (isinstance(validation_data, (iterator_ops.Iterator,
iterator_ops.OwnedIterator,
dataset_ops.DatasetV2,
data_utils.Sequence))
or not hasattr(validation_data, '__len__')):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
try:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
elif len(validation_data) == 3:
try:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
else:
if raise_if_ambiguous:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
val_x, val_y, val_sample_weight = validation_data, None, None
return val_x, val_y, val_sample_weight
class TrainingLoop(object):
"""TrainingLoop is a wrapper class around the training logic.
This class is trying to encapsulate the different logic of fit/eval/predict
with regard to different data input and model condition.
Note that TrainingLoop is stateless, which means it doesn't contain any
internal field and can be reused with different model and inputs.
"""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
"""Train the model with the inputs and targets."""
raise NotImplementedError()
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
"""Returns the loss value & metrics values for the model in test mode."""
raise NotImplementedError()
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
raise NotImplementedError()<|fim▁end|> | |
<|file_name|>generalizedaffineimage1.cc<|end_file_name|><|fim▁begin|>/* Test Octagonal_Shape::generalized_affine_image().
Copyright (C) 2001-2009 Roberto Bagnara <[email protected]>
This file is part of the Parma Polyhedra Library (PPL).
The PPL is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
The PPL is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307, USA.
For the most up-to-date information see the Parma Polyhedra Library
site: http://www.cs.unipr.it/ppl/ . */
#include "ppl_test.hh"
namespace {
bool
test01() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2);
oct.add_constraint(A >= 0);
oct.add_constraint(A <= 4);
oct.add_constraint(B <= 5);
oct.add_constraint(A <= B);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(2);
known_result.add_constraint(A >= 0);
known_result.add_constraint(A <= 4);
known_result.add_constraint(B - A >= 2);
oct.generalized_affine_image(B, GREATER_OR_EQUAL, A+2);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(B, "
"GREATER_OR_EQUAL, A+2) ***");
return ok;
}
bool
test02() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2);
oct.add_constraint(B >= 0);
oct.add_constraint(A - B >= 0);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(oct);
oct.generalized_affine_image(A, EQUAL, A + 2);
known_result.affine_image(A, A + 2);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image"
"(A, EQUAL, A + 2) ***");
return ok;
}
bool
test03() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2, EMPTY);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(2, EMPTY);
oct.generalized_affine_image(A, LESS_OR_EQUAL, B + 1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image"
"(A, LESS_OR_EQUAL, B + 1) ***");
return ok;
}
bool
test04() {
Variable x(0);
Variable y(1);
Variable z(2);
TOctagonal_Shape oct(3);
oct.add_constraint(x >= 2);
oct.add_constraint(x - y <= 3);
oct.add_constraint(y <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(x >= 1);
known_result.add_constraint(y <= 2);
known_result.add_constraint(- y <= 1);
oct.generalized_affine_image(x, GREATER_OR_EQUAL, 2*x - 2, 2);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(x,"
"GREATER_OR_EQUAL, 2*x - 2, 2) ***");
return ok;
}
bool
test05() {
Variable x(0);
Variable y(1);
TOctagonal_Shape oct(3);
oct.add_constraint(x >= 2);
oct.add_constraint(x - y <= 3);
oct.add_constraint(y <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(x >= 2);
known_result.add_constraint(x <= 5);
known_result.add_constraint(x - y <= 1);
oct.generalized_affine_image(y, GREATER_OR_EQUAL, 2*x - 2, 2);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(y, "
"GREATER_OR_EQUAL, 2*x - 2, 2) ***");
return ok;
}
bool
test06() {
Variable x(0);
Variable y(1);
TOctagonal_Shape oct(2);
oct.add_constraint(x >= 4);
oct.add_constraint(x <= -6);
oct.add_constraint(y == 0);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(2 , EMPTY);
oct.generalized_affine_image(y, LESS_OR_EQUAL, Linear_Expression(2));
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(y, "
"LESS_OR_EQUAL, 2) ***");
return ok;
}
bool
test07() {
Variable x(0);
// Variable y(1);
TOctagonal_Shape oct(2, EMPTY);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(oct);
oct.generalized_affine_image(x, EQUAL, Linear_Expression(6));
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(x, EQUAL, 6) ***");
return ok;
}
bool
test08() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2);
oct.add_constraint(A >= 0);
oct.add_constraint(B >= 0);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(2);
known_result.add_constraint(B >= 0);
known_result.add_constraint(A <= 3);
oct.generalized_affine_image(A, LESS_OR_EQUAL, Linear_Expression(3));
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(A, "
"LESS_OR_EQUAL, 3) ***");
return ok;
}
bool
test09() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2);
oct.add_constraint(A == 0);
oct.add_constraint(B >= 1);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(oct);
known_result.affine_image(B, Linear_Expression(5));
oct.generalized_affine_image(B, EQUAL, Linear_Expression(5));
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(B, EQUAL, 5) ***");
return ok;
}
bool
test10() {
Variable A(0);
Variable B(1);
TOctagonal_Shape oct(2);
oct.add_constraint(A + B == 0);
oct.add_constraint(B <= 1);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(2);
known_result.add_constraint(A >= 2);
known_result.add_constraint(B <= 1);
oct.generalized_affine_image(A, GREATER_OR_EQUAL, Linear_Expression(2));
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(A, "
"GREATER_OR_EQUAL, 2) ***");
return ok;
}
bool
test11() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A - B == 0);
oct.add_constraint(B <= 1);
oct.add_constraint(C + A <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A - B == 0);
known_result.add_constraint(B <= 1);
known_result.add_constraint(C + A <= 3);
known_result.add_constraint(C + B <= 3);
oct.generalized_affine_image(C, LESS_OR_EQUAL, C + 1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(C, "
"LESS_OR_EQUAL, C + 1) ***");
return ok;
}
bool
test12() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A - B == 0);
oct.add_constraint(B <= 1);
oct.add_constraint(C + A <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(oct);
known_result.affine_image(C, C + 1);
oct.generalized_affine_image(C, EQUAL, C + 1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(C, "
"EQUAL, C+1) ***");
return ok;
}
bool
test13() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A - B == 0);
oct.add_constraint(B <= 1);
oct.add_constraint(C + A <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(B - A >= -2);
known_result.add_constraint(A <= 1);
known_result.add_constraint(C + A <= 2);
oct.generalized_affine_image(B, GREATER_OR_EQUAL, B - 2);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(B, "
"GREATER_OR_EQUAL, B - 2) ***");
return ok;
}
bool
test14() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A - B == 0);
oct.add_constraint(B <= 1);
oct.add_constraint(C + A <= 2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(B <= C + 3);
known_result.add_constraint(A <= 1);
known_result.add_constraint(C + A <= 2);
oct.generalized_affine_image(B, LESS_OR_EQUAL, C + 3);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct, "*** oct.generalized_affine_image(B, "
"LESS_OR_EQUAL, C + 3) ***");
return ok;
}
bool
test15() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A - B == 0);
oct.add_constraint(B <= 1);
oct.add_constraint(C + A <=2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(oct);
known_result.affine_image(B, C + 3);
oct.generalized_affine_image(B, EQUAL, C + 3);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,
"*** oct.generalized_affine_image(B, EQUAL, C+3) ***");
return ok;
}
bool
test16() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A <= 21);
oct.add_constraint(B <= 1);
oct.add_constraint(C <= 2);
oct.add_constraint(A >= 2);
oct.add_constraint(B >= -1);
oct.add_constraint(C >= -2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A <= 21);
known_result.add_constraint(A >= 2);
known_result.add_constraint(C <= 2);
known_result.add_constraint(C >= -2);
known_result.add_constraint(B <= 4);
oct.generalized_affine_image(B, LESS_OR_EQUAL, B + 3);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,
"*** oct.generalized_affine_image(B, "
"LESS_OR_EQUAL, B + 3) ***");
return ok;
}
bool
test17() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A <= 21);
oct.add_constraint(B <= 1);
oct.add_constraint(C <= 2);
oct.add_constraint(A >= 2);
oct.add_constraint(B >= -1);
oct.add_constraint(C >= -2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A <= 21);
known_result.add_constraint(A >= 2);
known_result.add_constraint(C <= 2);
known_result.add_constraint(C >= -2);
known_result.add_constraint(B + C <= -3);
oct.generalized_affine_image(B, LESS_OR_EQUAL, C + 3, -1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,
"*** oct.generalized_affine_image(B, "
"LESS_OR_EQUAL, C + 3, -1) ***");
return ok;
}
bool
test18() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A <= 21);
oct.add_constraint(B <= 1);
oct.add_constraint(C <= 2);
oct.add_constraint(A >= 2);
oct.add_constraint(B >= -1);
oct.add_constraint(C >= -2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A <= 21);
known_result.add_constraint(A >= 2);
known_result.add_constraint(C <= 2);
known_result.add_constraint(C >= -2);
known_result.add_constraint(A + B <= -3);
oct.generalized_affine_image(B, LESS_OR_EQUAL, A + 3, -1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,
"*** oct.generalized_affine_image(B, "
"LESS_OR_EQUAL, A + 3, -1) ***");
return ok;
}
bool
test19() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A <= 21);
oct.add_constraint(B <= 1);
oct.add_constraint(C <= 2);
oct.add_constraint(A >= 2);
oct.add_constraint(B >= -1);
oct.add_constraint(C >= -2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A <= 21);
known_result.add_constraint(A >= 2);
known_result.add_constraint(C <= 2);
known_result.add_constraint(C >= -2);
known_result.add_constraint(B - A >= 3);
oct.generalized_affine_image(B, GREATER_OR_EQUAL, A + 3);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,<|fim▁hole|> "*** oct.generalized_affine_image(B, "
"GREATER_OR_EQUAL, A + 3) ***");
return ok;
}
bool
test20() {
Variable A(0);
Variable B(1);
Variable C(2);
TOctagonal_Shape oct(3);
oct.add_constraint(A <= 21);
oct.add_constraint(B <= 1);
oct.add_constraint(C <= 2);
oct.add_constraint(A >= 2);
oct.add_constraint(B >= -1);
oct.add_constraint(C >= -2);
print_constraints(oct, "*** oct ***");
Octagonal_Shape<mpq_class> known_result(3);
known_result.add_constraint(A <= 21);
known_result.add_constraint(A >= 2);
known_result.add_constraint(C <= 2);
known_result.add_constraint(C >= -2);
known_result.add_constraint(A + B >= -3);
oct.generalized_affine_image(B, GREATER_OR_EQUAL, A + 3, -1);
bool ok = (Octagonal_Shape<mpq_class>(oct) == known_result);
print_constraints(oct,
"*** oct.generalized_affine_image(B, "
"GREATER_OR_EQUAL, A + 3, -1) ***");
return ok;
}
} // namespace
BEGIN_MAIN
DO_TEST(test01);
DO_TEST(test02);
DO_TEST(test03);
DO_TEST(test04);
DO_TEST(test05);
DO_TEST(test06);
DO_TEST(test07);
DO_TEST(test08);
DO_TEST(test09);
DO_TEST(test10);
DO_TEST(test11);
DO_TEST(test12);
DO_TEST(test13);
DO_TEST(test14);
DO_TEST(test15);
DO_TEST(test16);
DO_TEST(test17);
DO_TEST(test18);
DO_TEST(test19);
DO_TEST(test20);
END_MAIN<|fim▁end|> | |
<|file_name|>jquery.bxslider.min.js<|end_file_name|><|fim▁begin|>/**
* BxSlider v4.1.2 - Fully loaded, responsive content slider
* http://bxslider.com
*
* Copyright 2014, Steven Wanderski - http://stevenwanderski.com - http://bxcreative.com
* Written while drinking Belgian ales and listening to jazz
*
* Released under the MIT license - http://opensource.org/licenses/MIT
*/
!function(t){
var e={},s={
mode:"horizontal",
slideSelector:"",
infiniteLoop:!0,
hideControlOnEnd:!1,
speed:500,
easing:null,
slideMargin:0,
startSlide:0,
randomStart:!1,
captions:!1,
ticker:!1,
tickerHover:!1,
adaptiveHeight:!1,
adaptiveHeightSpeed:500,
video:!1,
useCSS:!0,
preloadImages:"visible",
responsive:!0,
slideZIndex:50,
touchEnabled:!0,
swipeThreshold:50,
oneToOneTouch:!0,
preventDefaultSwipeX:!0,
preventDefaultSwipeY:!1,
pager:!0,
pagerType:"full",
pagerShortSeparator:" / ",
pagerSelector:null,
buildPager:null,
pagerCustom:null,
controls:!0,
nextText:"Next",
prevText:"Prev",
nextSelector:null,
prevSelector:null,
autoControls:!1,
startText:"Start",
stopText:"Stop",
autoControlsCombine:!1,
autoControlsSelector:null,
auto:!0,
pause:4e3,
autoStart:!0,
autoDirection:"next",
autoHover:!1,
autoDelay:0,
minSlides:1,
maxSlides:1,
moveSlides:0,
slideWidth:0,
onSliderLoad:function(){},
onSlideBefore:function(){},
onSlideAfter:function(){},
onSlideNext:function(){},
onSlidePrev:function(){},
onSliderResize:function(){}
};
t.fn.bxSlider=function(n){
if(0==this.length)return this;
if(this.length>1)return this.each(function(){
t(this).bxSlider(n)
}),this;
var o={},r=this;
e.el=this;
var a=t(window).width(),l=t(window).height(),d=function(){
o.settings=t.extend({},s,n),o.settings.slideWidth=parseInt(o.settings.slideWidth),o.children=r.children(o.settings.slideSelector),o.children.length<o.settings.minSlides&&(o.settings.minSlides=o.children.length),o.children.length<o.settings.maxSlides&&(o.settings.maxSlides=o.children.length),o.settings.randomStart&&(o.settings.startSlide=Math.floor(Math.random()*o.children.length)),o.active={
index:o.settings.startSlide
},o.carousel=o.settings.minSlides>1||o.settings.maxSlides>1,o.carousel&&(o.settings.preloadImages="all"),o.minThreshold=o.settings.minSlides*o.settings.slideWidth+(o.settings.minSlides-1)*o.settings.slideMargin,o.maxThreshold=o.settings.maxSlides*o.settings.slideWidth+(o.settings.maxSlides-1)*o.settings.slideMargin,o.working=!1,o.controls={},o.interval=null,o.animProp="vertical"==o.settings.mode?"top":"left",o.usingCSS=o.settings.useCSS&&"fade"!=o.settings.mode&&function(){
var t=document.createElement("div"),e=["WebkitPerspective","MozPerspective","OPerspective","msPerspective"];
for(var i in e)if(void 0!==t.style[e[i]])return o.cssPrefix=e[i].replace("Perspective","").toLowerCase(),o.animProp="-"+o.cssPrefix+"-transform",!0;return!1
}(),"vertical"==o.settings.mode&&(o.settings.maxSlides=o.settings.minSlides),r.data("origStyle",r.attr("style")),r.children(o.settings.slideSelector).each(function(){
t(this).data("origStyle",t(this).attr("style"))
}),c()
},c=function(){
r.wrap('<div class="bx-wrapper"><div class="bx-viewport"></div></div>'),o.viewport=r.parent(),o.loader=t('<div class="bx-loading" />'),o.viewport.prepend(o.loader),r.css({
width:"horizontal"==o.settings.mode?100*o.children.length+215+"%":"auto",
position:"relative"
}),o.usingCSS&&o.settings.easing?r.css("-"+o.cssPrefix+"-transition-timing-function",o.settings.easing):o.settings.easing||(o.settings.easing="swing"),f(),o.viewport.css({
width:"100%",
overflow:"hidden",
position:"relative"
}),o.viewport.parent().css({
maxWidth:p()
}),o.settings.pager||o.viewport.parent().css({
margin:"0 auto 0px"
}),o.children.css({
"float":"horizontal"==o.settings.mode?"left":"none",
listStyle:"none",
position:"relative"
}),o.children.css("width",u()),"horizontal"==o.settings.mode&&o.settings.slideMargin>0&&o.children.css("marginRight",o.settings.slideMargin),"vertical"==o.settings.mode&&o.settings.slideMargin>0&&o.children.css("marginBottom",o.settings.slideMargin),"fade"==o.settings.mode&&(o.children.css({
position:"absolute",
zIndex:0,
display:"none"
}),o.children.eq(o.settings.startSlide).css({
zIndex:o.settings.slideZIndex,
display:"block"
})),o.controls.el=t('<div class="bx-controls" />'),o.settings.captions&&P(),o.active.last=o.settings.startSlide==x()-1,o.settings.video&&r.fitVids();
var e=o.children.eq(o.settings.startSlide);
"all"==o.settings.preloadImages&&(e=o.children),o.settings.ticker?o.settings.pager=!1:(o.settings.pager&&T(),o.settings.controls&&C(),o.settings.auto&&o.settings.autoControls&&E(),(o.settings.controls||o.settings.autoControls||o.settings.pager)&&o.viewport.after(o.controls.el)),g(e,h)
},g=function(e,i){
var s=e.find("img, iframe").length;
if(0==s)return i(),void 0;
var n=0;
e.find("img, iframe").each(function(){
t(this).one("load",function(){
++n==s&&i()
}).each(function(){
this.complete&&t(this).load()
})
})
},h=function(){
if(o.settings.infiniteLoop&&"fade"!=o.settings.mode&&!o.settings.ticker){
var e="vertical"==o.settings.mode?o.settings.minSlides:o.settings.maxSlides,i=o.children.slice(0,e).clone().addClass("bx-clone"),s=o.children.slice(-e).clone().addClass("bx-clone");
r.append(i).prepend(s)
}
o.loader.remove(),S(),"vertical"==o.settings.mode&&(o.settings.adaptiveHeight=!0),o.viewport.height(v()),r.redrawSlider(),o.settings.onSliderLoad(o.active.index),o.initialized=!0,o.settings.responsive&&t(window).bind("resize",Z),o.settings.auto&&o.settings.autoStart&&H(),o.settings.ticker&&L(),o.settings.pager&&q(o.settings.startSlide),o.settings.controls&&W(),o.settings.touchEnabled&&!o.settings.ticker&&O()
},v=function(){
var e=0,s=t();
if("vertical"==o.settings.mode||o.settings.adaptiveHeight)if(o.carousel){
var n=1==o.settings.moveSlides?o.active.index:o.active.index*m();
for(s=o.children.eq(n),i=1;i<=o.settings.maxSlides-1;i++)s=n+i>=o.children.length?s.add(o.children.eq(i-1)):s.add(o.children.eq(n+i))
}else s=o.children.eq(o.active.index);else s=o.children;
return"vertical"==o.settings.mode?(s.each(function(){
e+=t(this).outerHeight()
}),o.settings.slideMargin>0&&(e+=o.settings.slideMargin*(o.settings.minSlides-1))):e=Math.max.apply(Math,s.map(function(){
return t(this).outerHeight(!1)
}).get()),e
},p=function(){
var t="100%";
return o.settings.slideWidth>0&&(t="horizontal"==o.settings.mode?o.settings.maxSlides*o.settings.slideWidth+(o.settings.maxSlides-1)*o.settings.slideMargin:o.settings.slideWidth),t
},u=function(){
var t=o.settings.slideWidth,e=o.viewport.width();
return 0==o.settings.slideWidth||o.settings.slideWidth>e&&!o.carousel||"vertical"==o.settings.mode?t=e:o.settings.maxSlides>1&&"horizontal"==o.settings.mode&&(e>o.maxThreshold||e<o.minThreshold&&(t=(e-o.settings.slideMargin*(o.settings.minSlides-1))/o.settings.minSlides)),t
},f=function(){
var t=1;
if("horizontal"==o.settings.mode&&o.settings.slideWidth>0)if(o.viewport.width()<o.minThreshold)t=o.settings.minSlides;
else if(o.viewport.width()>o.maxThreshold)t=o.settings.maxSlides;
else{
var e=o.children.first().width();
t=Math.floor(o.viewport.width()/e)
}else"vertical"==o.settings.mode&&(t=o.settings.minSlides);
return t
},x=function(){
var t=0;
if(o.settings.moveSlides>0)if(o.settings.infiniteLoop)t=o.children.length/m();else for(var e=0,i=0;e<o.children.length;)++t,e=i+f(),i+=o.settings.moveSlides<=f()?o.settings.moveSlides:f();else t=Math.ceil(o.children.length/f());
return t
},m=function(){
return o.settings.moveSlides>0&&o.settings.moveSlides<=f()?o.settings.moveSlides:f()
},S=function(){
if(o.children.length>o.settings.maxSlides&&o.active.last&&!o.settings.infiniteLoop){
if("horizontal"==o.settings.mode){
var t=o.children.last(),e=t.position();
b(-(e.left-(o.viewport.width()-t.width())),"reset",0)
}else if("vertical"==o.settings.mode){
var i=o.children.length-o.settings.minSlides,e=o.children.eq(i).position();
b(-e.top,"reset",0)
}
}else{
var e=o.children.eq(o.active.index*m()).position();
o.active.index==x()-1&&(o.active.last=!0),void 0!=e&&("horizontal"==o.settings.mode?b(-e.left,"reset",0):"vertical"==o.settings.mode&&b(-e.top,"reset",0))
}
},b=function(t,e,i,s){
if(o.usingCSS){
var n="vertical"==o.settings.mode?"translate3d(0, "+t+"px, 0)":"translate3d("+t+"px, 0, 0)";
r.css("-"+o.cssPrefix+"-transition-duration",i/1e3+"s"),"slide"==e?(r.css(o.animProp,n),r.bind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd",function(){
r.unbind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd"),D()
})):"reset"==e?r.css(o.animProp,n):"ticker"==e&&(r.css("-"+o.cssPrefix+"-transition-timing-function","linear"),r.css(o.animProp,n),r.bind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd",function(){
r.unbind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd"),b(s.resetValue,"reset",0),N()
}))
}else{
var a={};
a[o.animProp]=t,"slide"==e?r.animate(a,i,o.settings.easing,function(){
D()
}):"reset"==e?r.css(o.animProp,t):"ticker"==e&&r.animate(a,speed,"linear",function(){
b(s.resetValue,"reset",0),N()
})
}
},w=function(){
for(var e="",i=x(),s=0;i>s;s++){
var n="";
o.settings.buildPager&&t.isFunction(o.settings.buildPager)?(n=o.settings.buildPager(s),o.pagerEl.addClass("bx-custom-pager")):(n=s+1,o.pagerEl.addClass("bx-default-pager")),e+='<div class="bx-pager-item"><a href="" data-slide-index="'+s+'" class="bx-pager-link" style="overflow: hidden">'+n+"</a></div>"
}
o.pagerEl.html(e)
},T=function(){
o.settings.pagerCustom?o.pagerEl=t(o.settings.pagerCustom):(o.pagerEl=t('<div class="bx-pager" />'),o.settings.pagerSelector?t(o.settings.pagerSelector).html(o.pagerEl):o.controls.el.addClass("bx-has-pager").append(o.pagerEl),w()),o.pagerEl.on("click","a",I)
},C=function(){
o.controls.next=t('<a class="bx-next" href="">'+o.settings.nextText+"</a>"),o.controls.prev=t('<a class="bx-prev" href="">'+o.settings.prevText+"</a>"),o.controls.next.bind("click",y),o.controls.prev.bind("click",z),o.settings.nextSelector&&t(o.settings.nextSelector).append(o.controls.next),o.settings.prevSelector&&t(o.settings.prevSelector).append(o.controls.prev),o.settings.nextSelector||o.settings.prevSelector||(o.controls.directionEl=t('<div class="bx-controls-direction" />'),o.controls.directionEl.append(o.controls.prev).append(o.controls.next),o.controls.el.addClass("bx-has-controls-direction").append(o.controls.directionEl))
},E=function(){
o.controls.start=t('<div class="bx-controls-auto-item"><a class="bx-start" href="">'+o.settings.startText+"</a></div>"),o.controls.stop=t('<div class="bx-controls-auto-item"><a class="bx-stop" href="">'+o.settings.stopText+"</a></div>"),o.controls.autoEl=t('<div class="bx-controls-auto" />'),o.controls.autoEl.on("click",".bx-start",k),o.controls.autoEl.on("click",".bx-stop",M),o.settings.autoControlsCombine?o.controls.autoEl.append(o.controls.start):o.controls.autoEl.append(o.controls.start).append(o.controls.stop),o.settings.autoControlsSelector?t(o.settings.autoControlsSelector).html(o.controls.autoEl):o.controls.el.addClass("bx-has-controls-auto").append(o.controls.autoEl),A(o.settings.autoStart?"stop":"start")
},P=function(){
o.children.each(function(){
var e=t(this).find("img:first").attr("title");
void 0!=e&&(""+e).length&&t(this).append('<div class="bx-caption"><span>'+e+"</span></div>")
})
},y=function(t){
o.settings.auto&&r.stopAuto(),r.goToNextSlide(),t.preventDefault()
},z=function(t){
o.settings.auto&&r.stopAuto(),r.goToPrevSlide(),t.preventDefault()
},k=function(t){
r.startAuto(),t.preventDefault()
},M=function(t){
r.stopAuto(),t.preventDefault()
},I=function(e){
o.settings.auto&&r.stopAuto();
var i=t(e.currentTarget),s=parseInt(i.attr("data-slide-index"));
s!=o.active.index&&r.goToSlide(s),e.preventDefault()
},q=function(e){
var i=o.children.length;
return"short"==o.settings.pagerType?(o.settings.maxSlides>1&&(i=Math.ceil(o.children.length/o.settings.maxSlides)),o.pagerEl.html(e+1+o.settings.pagerShortSeparator+i),void 0):(o.pagerEl.find("a").removeClass("active"),o.pagerEl.each(function(i,s){
t(s).find("a").eq(e).addClass("active")
}),void 0)
},D=function(){
if(o.settings.infiniteLoop){
var t="";
0==o.active.index?t=o.children.eq(0).position():o.active.index==x()-1&&o.carousel?t=o.children.eq((x()-1)*m()).position():o.active.index==o.children.length-1&&(t=o.children.eq(o.children.length-1).position()),t&&("horizontal"==o.settings.mode?b(-t.left,"reset",0):"vertical"==o.settings.mode&&b(-t.top,"reset",0))
}
o.working=!1,o.settings.onSlideAfter(o.children.eq(o.active.index),o.oldIndex,o.active.index)
},A=function(t){
o.settings.autoControlsCombine?o.controls.autoEl.html(o.controls[t]):(o.controls.autoEl.find("a").removeClass("active"),o.controls.autoEl.find("a:not(.bx-"+t+")").addClass("active"))
},W=function(){
1==x()?(o.controls.prev.addClass("disabled"),o.controls.next.addClass("disabled")):!o.settings.infiniteLoop&&o.settings.hideControlOnEnd&&(0==o.active.index?(o.controls.prev.addClass("disabled"),o.controls.next.removeClass("disabled")):o.active.index==x()-1?(o.controls.next.addClass("disabled"),o.controls.prev.removeClass("disabled")):(o.controls.prev.removeClass("disabled"),o.controls.next.removeClass("disabled")))
},H=function(){
o.settings.autoDelay>0?setTimeout(r.startAuto,o.settings.autoDelay):r.startAuto(),o.settings.autoHover&&r.hover(function(){
o.interval&&(r.stopAuto(!0),o.autoPaused=!0)
},function(){
o.autoPaused&&(r.startAuto(!0),o.autoPaused=null)
})
},L=function(){
var e=0;
if("next"==o.settings.autoDirection)r.append(o.children.clone().addClass("bx-clone"));
else{
r.prepend(o.children.clone().addClass("bx-clone"));
var i=o.children.first().position();
e="horizontal"==o.settings.mode?-i.left:-i.top
}
b(e,"reset",0),o.settings.pager=!1,o.settings.controls=!1,o.settings.autoControls=!1,o.settings.tickerHover&&!o.usingCSS&&o.viewport.hover(function(){
r.stop()
},function(){
var e=0;
o.children.each(function(){
e+="horizontal"==o.settings.mode?t(this).outerWidth(!0):t(this).outerHeight(!0)
});
var i=o.settings.speed/e,s="horizontal"==o.settings.mode?"left":"top",n=i*(e-Math.abs(parseInt(r.css(s))));
N(n)
}),N()
},N=function(t){
speed=t?t:o.settings.speed;
var e={
left:0,
top:0
},i={
left:0,
top:0
};
"next"==o.settings.autoDirection?e=r.find(".bx-clone").first().position():i=o.children.first().position();
var s="horizontal"==o.settings.mode?-e.left:-e.top,n="horizontal"==o.settings.mode?-i.left:-i.top,a={
resetValue:n
};
b(s,"ticker",speed,a)
},O=function(){
o.touch={
start:{
x:0,
y:0
},
end:{
x:0,
y:0
}
},o.viewport.bind("touchstart",X)
},X=function(t){
if(o.working)t.preventDefault();
else{
o.touch.originalPos=r.position();
var e=t.originalEvent;
o.touch.start.x=e.changedTouches[0].pageX,o.touch.start.y=e.changedTouches[0].pageY,o.viewport.bind("touchmove",Y),o.viewport.bind("touchend",V)
}
},Y=function(t){
var e=t.originalEvent,i=Math.abs(e.changedTouches[0].pageX-o.touch.start.x),s=Math.abs(e.changedTouches[0].pageY-o.touch.start.y);
if(3*i>s&&o.settings.preventDefaultSwipeX?t.preventDefault():3*s>i&&o.settings.preventDefaultSwipeY&&t.preventDefault(),"fade"!=o.settings.mode&&o.settings.oneToOneTouch){
var n=0;
if("horizontal"==o.settings.mode){
var r=e.changedTouches[0].pageX-o.touch.start.x;
n=o.touch.originalPos.left+r
}else{
var r=e.changedTouches[0].pageY-o.touch.start.y;
n=o.touch.originalPos.top+r
}
b(n,"reset",0)
}
},V=function(t){
o.viewport.unbind("touchmove",Y);
var e=t.originalEvent,i=0;
if(o.touch.end.x=e.changedTouches[0].pageX,o.touch.end.y=e.changedTouches[0].pageY,"fade"==o.settings.mode){
var s=Math.abs(o.touch.start.x-o.touch.end.x);
s>=o.settings.swipeThreshold&&(o.touch.start.x>o.touch.end.x?r.goToNextSlide():r.goToPrevSlide(),r.stopAuto())
}else{
var s=0;
"horizontal"==o.settings.mode?(s=o.touch.end.x-o.touch.start.x,i=o.touch.originalPos.left):(s=o.touch.end.y-o.touch.start.y,i=o.touch.originalPos.top),!o.settings.infiniteLoop&&(0==o.active.index&&s>0||o.active.last&&0>s)?b(i,"reset",200):Math.abs(s)>=o.settings.swipeThreshold?(0>s?r.goToNextSlide():r.goToPrevSlide(),r.stopAuto()):b(i,"reset",200)
}
o.viewport.unbind("touchend",V)
},Z=function(){
var e=t(window).width(),i=t(window).height();
(a!=e||l!=i)&&(a=e,l=i,r.redrawSlider(),o.settings.onSliderResize.call(r,o.active.index))
<|fim▁hole|> };
return r.goToSlide=function(e,i){
if(!o.working&&o.active.index!=e)if(o.working=!0,o.oldIndex=o.active.index,o.active.index=0>e?x()-1:e>=x()?0:e,o.settings.onSlideBefore(o.children.eq(o.active.index),o.oldIndex,o.active.index),"next"==i?o.settings.onSlideNext(o.children.eq(o.active.index),o.oldIndex,o.active.index):"prev"==i&&o.settings.onSlidePrev(o.children.eq(o.active.index),o.oldIndex,o.active.index),o.active.last=o.active.index>=x()-1,o.settings.pager&&q(o.active.index),o.settings.controls&&W(),"fade"==o.settings.mode)o.settings.adaptiveHeight&&o.viewport.height()!=v()&&o.viewport.animate({
height:v()
},o.settings.adaptiveHeightSpeed),o.children.filter(":visible").fadeOut(o.settings.speed).css({
zIndex:0
}),o.children.eq(o.active.index).css("zIndex",o.settings.slideZIndex+1).fadeIn(o.settings.speed,function(){
t(this).css("zIndex",o.settings.slideZIndex),D()
});
else{
o.settings.adaptiveHeight&&o.viewport.height()!=v()&&o.viewport.animate({
height:v()
},o.settings.adaptiveHeightSpeed);
var s=0,n={
left:0,
top:0
};
if(!o.settings.infiniteLoop&&o.carousel&&o.active.last)if("horizontal"==o.settings.mode){
var a=o.children.eq(o.children.length-1);
n=a.position(),s=o.viewport.width()-a.outerWidth()
}else{
var l=o.children.length-o.settings.minSlides;
n=o.children.eq(l).position()
}else if(o.carousel&&o.active.last&&"prev"==i){
var d=1==o.settings.moveSlides?o.settings.maxSlides-m():(x()-1)*m()-(o.children.length-o.settings.maxSlides),a=r.children(".bx-clone").eq(d);
n=a.position()
}else if("next"==i&&0==o.active.index)n=r.find("> .bx-clone").eq(o.settings.maxSlides).position(),o.active.last=!1;
else if(e>=0){
var c=e*m();
n=o.children.eq(c).position()
}
if("undefined"!=typeof n){
var g="horizontal"==o.settings.mode?-(n.left-s):-n.top;
b(g,"slide",o.settings.speed)
}
}
},r.goToNextSlide=function(){
if(o.settings.infiniteLoop||!o.active.last){
var t=parseInt(o.active.index)+1;
r.goToSlide(t,"next")
}
},r.goToPrevSlide=function(){
if(o.settings.infiniteLoop||0!=o.active.index){
var t=parseInt(o.active.index)-1;
r.goToSlide(t,"prev")
}
},r.startAuto=function(t){
o.interval||(o.interval=setInterval(function(){
"next"==o.settings.autoDirection?r.goToNextSlide():r.goToPrevSlide()
},o.settings.pause),o.settings.autoControls&&1!=t&&A("stop"))
},r.stopAuto=function(t){
o.interval&&(clearInterval(o.interval),o.interval=null,o.settings.autoControls&&1!=t&&A("start"))
},r.getCurrentSlide=function(){
return o.active.index
},r.getCurrentSlideElement=function(){
return o.children.eq(o.active.index)
},r.getSlideCount=function(){
return o.children.length
},r.redrawSlider=function(){
o.children.add(r.find(".bx-clone")).outerWidth(u()),o.viewport.css("height",v()),o.settings.ticker||S(),o.active.last&&(o.active.index=x()-1),o.active.index>=x()&&(o.active.last=!0),o.settings.pager&&!o.settings.pagerCustom&&(w(),q(o.active.index))
},r.destroySlider=function(){
o.initialized&&(o.initialized=!1,t(".bx-clone",this).remove(),o.children.each(function(){
void 0!=t(this).data("origStyle")?t(this).attr("style",t(this).data("origStyle")):t(this).removeAttr("style")
}),void 0!=t(this).data("origStyle")?this.attr("style",t(this).data("origStyle")):t(this).removeAttr("style"),t(this).unwrap().unwrap(),o.controls.el&&o.controls.el.remove(),o.controls.next&&o.controls.next.remove(),o.controls.prev&&o.controls.prev.remove(),o.pagerEl&&o.settings.controls&&o.pagerEl.remove(),t(".bx-caption",this).remove(),o.controls.autoEl&&o.controls.autoEl.remove(),clearInterval(o.interval),o.settings.responsive&&t(window).unbind("resize",Z))
},r.reloadSlider=function(t){
void 0!=t&&(n=t),r.destroySlider(),d()
},d(),this
}
}(jQuery);<|fim▁end|> | |
<|file_name|>km.js<|end_file_name|><|fim▁begin|>OC.L10N.register(
"passman",
{
"General" : "ទូទៅ",
"Done" : "Done",
"Sharing" : "ការចែករំលែក",
"Share link" : "Share link",
"Username" : "ឈ្មោះអ្នកប្រើ",
"File" : "File",
"Add" : "បញ្ចូល",
"Type" : "Type",
"Size" : "ទំហំ",
"Expiration date" : "ពេលផុតកំណត់",
"Disabled" : "បានបិទ",<|fim▁hole|> "Uploading" : "Uploading",
"User" : "User",
"Files" : "ឯកសារ",
"Pending" : "កំពុងរង់ចាំ",
"Details" : "ព័ត៌មានលម្អិត",
"by" : "ដោយ",
"Save" : "រក្សាទុក",
"Cancel" : "បោះបង់",
"Settings" : "ការកំណត់",
"Unshare" : "លែងចែករំលែក",
"Password" : "ពាក្យសម្ងាត់",
"URL" : "URL",
"Notes" : "កំណត់ចំណាំ",
"Edit" : "កែប្រែ",
"Delete" : "លុប",
"Share" : "ចែករំលែក",
"Date" : "Date",
"Tags" : "ស្លាក",
"Description" : "ការអធិប្បាយ",
"You created %1$s" : "អ្នកបានបង្កើត %1$s",
"You deleted %1$s" : "អ្នកបានលុប %1$s",
"seconds ago" : "វិនាទីមុន"
},
"nplurals=1; plural=0;");<|fim▁end|> | "Export" : "នាំចេញ",
"Version" : "កំណែ",
"Import" : "នាំយកចូល", |
<|file_name|>iterable.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(generic_associated_types)]
//~^ WARNING the feature `generic_associated_types` is incomplete
use std::ops::Deref;
// FIXME(#44265): "lifetime parameters are not allowed on this type" errors will be addressed in a
// follow-up PR.
trait Iterable {
type Item<'a>;
type Iter<'a>: Iterator<Item = Self::Item<'a>>;
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
fn iter<'a>(&'a self) -> Self::Iter<'a>;
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
}
// Impl for struct type<|fim▁hole|> type Item<'a> = &'a T;
type Iter<'a> = std::slice::Iter<'a, T>;
fn iter<'a>(&'a self) -> Self::Iter<'a> {
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
self.iter()
}
}
// Impl for a primitive type
impl<T> Iterable for [T] {
type Item<'a> = &'a T;
type Iter<'a> = std::slice::Iter<'a, T>;
fn iter<'a>(&'a self) -> Self::Iter<'a> {
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
self.iter()
}
}
fn make_iter<'a, I: Iterable>(it: &'a I) -> I::Iter<'a> {
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
it.iter()
}
fn get_first<'a, I: Iterable>(it: &'a I) -> Option<I::Item<'a>> {
//~^ ERROR lifetime parameters are not allowed on this type [E0110]
it.iter().next()
}
fn main() {}<|fim▁end|> | impl<T> Iterable for Vec<T> { |
<|file_name|>utf8.cpp<|end_file_name|><|fim▁begin|>#include "utf8.hpp"
#include "utf8/utf8.h"
#include <cstring>
namespace core
{
UTF8String::UTF8String()
{}
UTF8String::UTF8String(const UTF8String& cp)
: m_src(cp.m_src)
{}
UTF8String::UTF8String(const std::string& src)
: m_src(src)
{}
UTF8String::~UTF8String()
{
}<|fim▁hole|> size_t UTF8String::size() const
{
utf8::iterator<std::string::const_iterator> it(m_src.cbegin(), m_src.cbegin(), m_src.cend());
utf8::iterator<std::string::const_iterator> end(m_src.cend(), m_src.cbegin(), m_src.cend());
size_t count = 0;
while(it != end) {
++count;
++it;
}
return count;
}
void UTF8String::clear()
{
m_src.clear();
}
bool UTF8String::empty() const
{
return m_src.empty();
}
bool UTF8String::valid() const
{
return utf8::is_valid(m_src.begin(), m_src.end());
}
void UTF8String::removeErrors()
{
std::string temp;
utf8::replace_invalid(m_src.begin(), m_src.end(), std::back_inserter(temp));
m_src = temp;
}
UTF8String& UTF8String::operator=(const UTF8String& cp)
{
m_src = cp.m_src;
return *this;
}
std::string UTF8String::getSrc() const
{
return m_src;
}
UTF8String::operator std::string() const
{
return getSrc();
}
unsigned int UTF8String::operator[](size_t idx) const
{
utf8::iterator<std::string::const_iterator> it(m_src.cbegin(), m_src.cbegin(), m_src.cend());
for(size_t i = 0; i < idx; ++i)
++it;
return *it;
}
bool operator==(const UTF8String& s1, const UTF8String& s2)
{
return s1.getSrc() == s2.getSrc();
}
std::ostream& operator<<(std::ostream& os, const UTF8String& str)
{
os << str.getSrc();
return os;
}
}<|fim▁end|> | |
<|file_name|>MetricSource.java<|end_file_name|><|fim▁begin|>package eu.uqasar.model.measure;
/*
* #%L
* U-QASAR
* %%
* Copyright (C) 2012 - 2015 U-QASAR Consortium
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.wicket.model.IModel;
import eu.uqasar.util.resources.IResourceKeyProvider;
import eu.uqasar.util.resources.ResourceBundleLocator;
public enum MetricSource implements IResourceKeyProvider {
TestingFramework("test"),
IssueTracker("issue"),
StaticAnalysis("static"),
ContinuousIntegration("ci"),
CubeAnalysis("cube"),
VersionControl("vcs"),
Manual("manual"),
;
private final String labelKey;
MetricSource(final String labelKey) {
this.labelKey = labelKey;
}
public String toString() {
return getLabelModel().getObject();
}
<|fim▁hole|>
@Override
public String getKey() {
return "label.source." + this.labelKey;
}
public static List<MetricSource> getAllMetricSources(){
List<MetricSource> list = new ArrayList<>();
Collections.addAll(list, MetricSource.values());
return list;
}
}<|fim▁end|> |
public IModel<String> getLabelModel() {
return ResourceBundleLocator.getLabelModel(this.getClass(), this);
}
|
<|file_name|>building_block_factory.py<|end_file_name|><|fim▁begin|># Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of construction functions for building block structures."""
import functools
import random
import string
from typing import AbstractSet, Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.compiler import tensorflow_computation_factory
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.types import type_transformations
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
Index = Union[str, int]
Path = Union[Index, Tuple[Index, ...]]
def select_output_from_lambda(
comp: building_blocks.Lambda,
paths: Union[Path, List[Path]]) -> building_blocks.Lambda:
"""Constructs a new function with result of selecting `paths` from `comp`.
Args:
comp: Lambda computation with result type `tff.StructType` from which we
wish to select the sub-results at `paths`.
paths: Either a `Path` or list of `Path`s specifying the indices we wish to
select from the result of `comp`. Each path must be a `tuple` of `str` or
`int` indices from which to select an output. If `paths` is a list, the
returned computation will have a `tff.StructType` result holding each of
the specified selections.
Returns:
A version of `comp` with result value the selection from the result of
`comp` specified by `paths`.
"""
comp.check_lambda()
comp.type_signature.result.check_struct()
def _select_path(result, path: Path):
if not isinstance(path, tuple):
path = (path,)
for index in path:
if result.is_struct():
result = result[index]
elif isinstance(index, str):
result = building_blocks.Selection(result, name=index)
elif isinstance(index, int):
result = building_blocks.Selection(result, index=index)
else:
raise TypeError('Invalid selection type: expected `str` or `int`, '
f'found value `{index}` of type `{type(index)}`.')
return result
if isinstance(paths, list):
# Avoid duplicating `comp.result` by binding it to a local.
result_name = next(unique_name_generator(comp))
result_ref = building_blocks.Reference(result_name,
comp.result.type_signature)
elements = [_select_path(result_ref, path) for path in paths]
result = building_blocks.Block([(result_name, comp.result)],
building_blocks.Struct(elements))
else:
result = _select_path(comp.result, paths)
return building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
result)
def unique_name_generator(comp: building_blocks.ComputationBuildingBlock,
prefix: str = '_var') -> Iterator[str]:
"""Yields a new unique name that does not exist in `comp`.
Args:
comp: The compuation building block to use as a reference.
prefix: The prefix to use when generating unique names. If `prefix` is
`None` or if `comp` contains any name with this prefix, then a unique
prefix will be generated from random lowercase ascii characters.
"""
if comp is not None:
names = transformation_utils.get_unique_names(comp)
else:
names = set()
while prefix is None or any(n.startswith(prefix) for n in names):
characters = string.ascii_lowercase
prefix = '_{}'.format(''.join(random.choice(characters) for _ in range(3)))
index = 1
while True:
yield '{}{}'.format(prefix, index)
index += 1
@functools.lru_cache()
def create_compiled_no_arg_empty_tuple_computation(
) -> building_blocks.CompiledComputation:
"""Returns graph representing a function that returns an empty tuple.
Returns:
An instance of `building_blocks.CompiledComputation`, a noarg function
which returns an empty tuple.
"""
proto, type_signature = tensorflow_computation_factory.create_empty_tuple()
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
@functools.lru_cache()
def create_compiled_empty_tuple() -> building_blocks.Call:
"""Returns called graph representing the empty tuple.
Returns:
An instance of `building_blocks.Call`, calling a noarg function
which returns an empty tuple. This function is an instance of
`building_blocks.CompiledComputation`.
"""
compiled = create_compiled_no_arg_empty_tuple_computation()
return building_blocks.Call(compiled, None)
@functools.lru_cache()
def create_identity(
type_signature: computation_types.Type) -> building_blocks.Lambda:
return building_blocks.Lambda(
'id_arg', type_signature,
building_blocks.Reference('id_arg', type_signature))
@functools.lru_cache()
def create_compiled_identity(
type_signature: computation_types.Type,
name: Optional[str] = None) -> building_blocks.CompiledComputation:
"""Creates CompiledComputation representing identity function.
Args:
type_signature: A `computation_types.Type`.
name: An optional string name to use as the name of the computation.
Returns:
An instance of `building_blocks.CompiledComputation`
representing the identity function taking an argument of type
`type_signature` and returning the same value.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings.
"""
proto, function_type = tensorflow_computation_factory.create_identity(
type_signature)
return building_blocks.CompiledComputation(
proto, name, type_signature=function_type)
class SelectionSpec(object):
"""Data class representing map from input tuple to selection of result.
Attributes:
tuple_index: The index of the source of the selection sequence in the
desired result of the generated TensorFlow. If this `SelectionSpec`
appears at index i of a list of `SelectionSpec`s, index j is the source
for the result of the generated function at index i.
selection_sequence: A list or tuple representing the selections to make from
`tuple_index`, so that the list `[0]` for example would represent the
output is the 0th element of `tuple_index`, while `[0, 0]` would represent
that the output is the 0th element of the 0th element of `tuple_index`.
"""
def __init__(self, tuple_index: int, selection_sequence: Sequence[int]):
self._tuple_index = tuple_index
self._selection_sequence = selection_sequence
@property
def tuple_index(self):
return self._tuple_index
@property
def selection_sequence(self):
return self._selection_sequence
def __str__(self):
return 'SelectionSequence(tuple_index={},selection_sequence={}'.format(
self._tuple_index, self._selection_sequence)
def __repr__(self):
return str(self)
def _extract_selections(parameter_value, output_spec):
results = []
for selection_spec in output_spec:
result_element = parameter_value[selection_spec.tuple_index]
for selection in selection_spec.selection_sequence:
py_typecheck.check_type(selection, int)
result_element = result_element[selection]
results.append(result_element)
return results
@functools.lru_cache()
def construct_tensorflow_selecting_and_packing_outputs(
parameter_type: computation_types.StructType,
output_structure: structure.Struct) -> building_blocks.CompiledComputation:
"""Constructs TensorFlow selecting and packing elements from its input.
The result of this function can be called on a deduplicated
`building_blocks.Struct` containing called graphs, thus preventing us from
embedding the same TensorFlow computation in the generated graphs, and
reducing the amount of work duplicated in the process of generating
TensorFlow.
The TensorFlow which results here will be a function which takes an argument
of type `arg_type`, returning a result specified by `output_structure`. Each
`SelectionSpec` nested inside of `output_structure` will represent a selection
from one of the arguments of the tuple `arg_type`, with the empty selection
being a possibility. The nested structure of `output_structure` will determine
how these selections are packed back into a result, IE, the result of the
function will be a nested tuple with the same structure as `output_structure`,
where the leaves of this structure (the `SelectionSpecs` of
`output_structure`) will be selections from the argument.
Args:
parameter_type: A `computation_types.StructType` of the argument on which
the constructed function will be called.
output_structure: `structure.Struct` with `SelectionSpec` or
`anonymous_tupl.Struct` elements, mapping from elements of the nested
argument tuple to the desired result of the generated computation.
`output_structure` must contain all the names desired on the output of the
computation.
Returns:
A `building_blocks.CompiledComputation` representing the specification
above.
Raises:
TypeError: If `arg_type` is not a `computation_types.StructType`, or
represents a type which cannot act as an input or output to a TensorFlow
computation in TFF, IE does not contain exclusively
`computation_types.SequenceType`, `computation_types.StructType` or
`computation_types.TensorType`.
"""
py_typecheck.check_type(parameter_type, computation_types.StructType)
py_typecheck.check_type(output_structure, structure.Struct)
def _check_output_structure(elem):
if isinstance(elem, structure.Struct):
for x in elem:
_check_output_structure(x)
elif not isinstance(elem, SelectionSpec):
raise TypeError('output_structure can only contain nested anonymous '
'tuples and `SelectionSpecs`; encountered the value {} '
'of type {}.'.format(elem, type(elem)))
_check_output_structure(output_structure)
output_spec = structure.flatten(output_structure)
type_analysis.check_tensorflow_compatible_type(parameter_type)
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', parameter_type, graph)
results = _extract_selections(parameter_value, output_spec)
repacked_result = structure.pack_sequence_as(output_structure, results)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
repacked_result, graph)
function_type = computation_types.FunctionType(parameter_type, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(
proto, type_signature=function_type)
@functools.lru_cache()
def create_tensorflow_constant(type_spec: computation_types.Type,
scalar_value: Union[int, float, str],
name=None) -> building_blocks.Call:
"""Creates called graph returning constant `scalar_value` of type `type_spec`.
`scalar_value` must be a scalar, and cannot be a float if any of the tensor
leaves of `type_spec` contain an integer data type. `type_spec` must contain
only named tuples and tensor types, but these can be arbitrarily nested.
Args:
type_spec: A `computation_types.Type` whose resulting type tree can only
contain named tuples and tensors.
scalar_value: Scalar value to place in all the tensor leaves of `type_spec`.
name: An optional string name to use as the name of the computation.
Returns:
An instance of `building_blocks.Call`, whose argument is `None`
and whose function is a noarg
`building_blocks.CompiledComputation` which returns the
specified `scalar_value` packed into a TFF structure of type `type_spec.
Raises:
TypeError: If the type assumptions above are violated.
"""
proto, function_type = tensorflow_computation_factory.create_constant(
scalar_value, type_spec)
compiled = building_blocks.CompiledComputation(
proto, name, type_signature=function_type)
return building_blocks.Call(compiled, None)
@functools.lru_cache()
def create_compiled_input_replication(
type_signature: computation_types.Type,
n_replicas: int) -> building_blocks.CompiledComputation:
"""Creates a compiled computation which replicates its argument.
Args:
type_signature: A `computation_types.Type`, the type of the parameter of the
constructed computation.
n_replicas: Integer, the number of times the argument is intended to be
replicated.
Returns:
An instance of `building_blocks.CompiledComputation` encoding
a function taking a single argument fo type `type_signature` and returning
`n_replicas` identical copies of this argument.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings, or if `n_replicas` is not an integer.
"""
proto, comp_type = tensorflow_computation_factory.create_replicate_input(
type_signature, n_replicas)
return building_blocks.CompiledComputation(proto, type_signature=comp_type)
def create_tensorflow_unary_operator(
operator: Callable[[Any], Any], operand_type: computation_types.Type
) -> building_blocks.CompiledComputation:
"""Creates a TensorFlow computation for the unary `operator`.
For `T` the `operand_type`, the type signature of the constructed operator
will be `(T -> U)`, where `U` is the result of applying `operator` to
a value of type `T`.
Notice that we have quite serious restrictions on `operand_type` here; not
only must it be compatible with stamping into a TensorFlow graph, but
additionally cannot contain a `computation_types.SequenceType`, as checked by
`type_analysis.is_generic_op_compatible_type`.
Args:
operator: Callable taking one argument specifying the operation to encode.
For example, `tf.math.abs`, `tf.math.reduce_sum`, ...
operand_type: The type of argument to the constructed unary operator. Must
be convertible to `computation_types.Type`.
Returns:
Instance of `building_blocks.CompiledComputation` encoding this unary
operator.
Raises:
TypeError: If the type tree of `operand_type` contains any type which is
incompatible with the TFF generic operators, as checked by
`type_analysis.is_generic_op_compatible_type`, or `operator` is not
callable.
"""
proto, type_signature = tensorflow_computation_factory.create_unary_operator(
operator, operand_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
def create_tensorflow_binary_operator(
operator: Callable[[Any, Any], Any],
operand_type: computation_types.Type,
second_operand_type: Optional[computation_types.Type] = None
) -> building_blocks.CompiledComputation:
"""Creates a TensorFlow computation for the binary `operator`.
For `T` the `operand_type`, the type signature of the constructed operator
will be `(<T,T> -> U)`, where `U` is the result of applying `operator` to
a tuple of type `<T,T>`.
Notice that we have quite serious restrictions on `operand_type` here; not
only must it be compatible with stamping into a TensorFlow graph, but
additionally cannot contain a `computation_types.SequenceType`, as checked by
`type_analysis.is_generic_op_compatible_type`.
Notice also that if `operand_type` is a `computation_types.StructType` and
`second_operand_type` is not `None`, `operator` will be applied pointwise.
This places the burden on callers of this function to construct the correct
values to pass into the returned function. For example, to divide `[2, 2]` by
`2`, first the `int 2` must be packed into the data structure `[x, x]`, before
the division operator of the appropriate type is called.
Args:
operator: Callable taking two arguments specifying the operation to encode.
For example, `tf.add`, `tf.multiply`, `tf.divide`, ...
operand_type: The type of argument to the constructed binary operator. Must
be convertible to `computation_types.Type`.
second_operand_type: An optional type for the second argument to the
constructed binary operator. Must be convertible to
`computation_types.Type`. If `None`, uses `operand_type` for the second
argument's type.
Returns:
Instance of `building_blocks.CompiledComputation` encoding
this binary operator.
Raises:
TypeError: If the type tree of `operand_type` contains any type which is
incompatible with the TFF generic operators, as checked by
`type_analysis.is_generic_op_compatible_type`, or `operator` is not
callable.
"""
proto, type_signature = tensorflow_computation_factory.create_binary_operator(
operator, operand_type, second_operand_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
def create_federated_getitem_call(
arg: building_blocks.ComputationBuildingBlock,
idx: Union[int, slice]) -> building_blocks.Call:
"""Creates computation building block passing getitem to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.StructType` from which we wish to pick out item `idx`.
idx: Index, instance of `int` or `slice` used to address the
`computation_types.StructType` underlying `arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`, the result
of applying or mapping the appropriate `__getitem__` function, as defined
by `idx`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(idx, (int, slice))
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.StructType)
getitem_comp = create_federated_getitem_comp(arg, idx)
return create_federated_map_or_apply(getitem_comp, arg)
def create_federated_getattr_call(arg: building_blocks.ComputationBuildingBlock,
name: str) -> building_blocks.Call:
"""Creates computation building block passing getattr to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.StructType` from which we wish to pick out item `name`.
name: String name to address the `computation_types.StructType` underlying
`arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`,
the result of applying or mapping the appropriate `__getattr__` function,
as defined by `name`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(name, str)
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.StructType)
getattr_comp = create_federated_getattr_comp(arg, name)
return create_federated_map_or_apply(getattr_comp, arg)
def create_federated_getattr_comp(
comp: building_blocks.ComputationBuildingBlock,
name: str) -> building_blocks.Lambda:
"""Function to construct computation for `federated_apply` of `__getattr__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `name` from its argument, of type `comp.type_signature.member`,
an instance of `computation_types.StructType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.StructType`.
name: String name of attribute to grab.
Returns:
Instance of `building_blocks.Lambda` which grabs attribute
according to `name` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.StructType)
py_typecheck.check_type(name, str)
element_names = [
x for x, _ in structure.iter_elements(comp.type_signature.member)
]
if name not in element_names:
raise ValueError(
'The federated value has no element of name `{}`. Value: {}'.format(
name, comp.formatted_representation()))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
selected = building_blocks.Selection(apply_input, name=name)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_federated_getitem_comp(
comp: building_blocks.ComputationBuildingBlock,
key: Union[int, slice]) -> building_blocks.Lambda:
"""Function to construct computation for `federated_apply` of `__getitem__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `key` from its argument, of type `comp.type_signature.member`,
of type `computation_types.StructType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.StructType`.
key: Instance of `int` or `slice`, key used to grab elements from the member
of `comp`. implementation of slicing for `ValueImpl` objects with
`type_signature` `computation_types.StructType`.
Returns:
Instance of `building_blocks.Lambda` which grabs slice
according to `key` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.StructType)
py_typecheck.check_type(key, (int, slice))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
if isinstance(key, int):
selected = building_blocks.Selection(apply_input, index=key)
else:
elems = structure.to_elements(comp.type_signature.member)
index_range = range(*key.indices(len(elems)))
elem_list = []
for k in index_range:
elem_list.append(
(elems[k][0], building_blocks.Selection(apply_input, index=k)))
selected = building_blocks.Struct(elem_list)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_computation_appending(
comp1: building_blocks.ComputationBuildingBlock,
comp2: building_blocks.ComputationBuildingBlock):
r"""Returns a block appending `comp2` to `comp1`.
Block
/ \
[comps=Tuple] Tuple
| |
[Comp, Comp] [Sel(0), ..., Sel(0), Sel(1)]
\ \ \
Sel(0) Sel(n) Ref(comps)
\ \
Ref(comps) Ref(comps)
Args:
comp1: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_type.StructType`.
comp2: A `building_blocks.ComputationBuildingBlock` or a named computation
(a tuple pair of name, computation) representing a single element of an
`structure.Struct`.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(comp1, building_blocks.ComputationBuildingBlock)
if isinstance(comp2, building_blocks.ComputationBuildingBlock):
name2 = None
elif py_typecheck.is_name_value_pair(
comp2,
name_required=False,
value_type=building_blocks.ComputationBuildingBlock):
name2, comp2 = comp2
else:
raise TypeError('Unexpected tuple element: {}.'.format(comp2))
comps = building_blocks.Struct((comp1, comp2))
ref = building_blocks.Reference('comps', comps.type_signature)
sel_0 = building_blocks.Selection(ref, index=0)
elements = []
named_type_signatures = structure.to_elements(comp1.type_signature)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(sel_0, index=index)
elements.append((name, sel))
sel_1 = building_blocks.Selection(ref, index=1)
elements.append((name2, sel_1))
result = building_blocks.Struct(elements)
symbols = ((ref.name, comps),)
return building_blocks.Block(symbols, result)
def _unname_fn_parameter(fn, unnamed_parameter_type):
"""Coerces `fn` to a comp whose parameter type is `unnamed_parameter_type`."""
if structure.name_list(fn.type_signature.parameter):
return building_blocks.Lambda(
'a', unnamed_parameter_type,
building_blocks.Call(
fn,
building_blocks.Reference('a', unnamed_parameter_type),
))
else:
return fn
def create_null_federated_aggregate() -> building_blocks.Call:
unit = building_blocks.Struct([])
unit_type = unit.type_signature
value = create_federated_value(unit, placements.CLIENTS)
zero = unit
accumulate = create_tensorflow_binary_operator(lambda a, b: a, unit_type)
merge = accumulate
report = create_compiled_identity(computation_types.StructType([]))
return create_federated_aggregate(value, zero, accumulate, merge, report)
def create_federated_aggregate(
value: building_blocks.ComputationBuildingBlock,
zero: building_blocks.ComputationBuildingBlock,
accumulate: building_blocks.ComputationBuildingBlock,
merge: building_blocks.ComputationBuildingBlock,
report: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated aggregate.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
accumulate: A `building_blocks.ComputationBuildingBlock` to use as the
accumulate function.
merge: A `building_blocks.ComputationBuildingBlock` to use as the merge
function.
report: A `building_blocks.ComputationBuildingBlock` to use as the report
function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(accumulate, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(merge, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(report, building_blocks.ComputationBuildingBlock)
# Its okay if the first argument of accumulate is assignable from the zero,
# without being the exact type. This occurs when accumulate has a type like
# (<int32[?], int32> -> int32[?]) but zero is int32[0].
zero_arg_type = accumulate.type_signature.parameter[0]
zero_arg_type.check_assignable_from(zero.type_signature)
result_type = computation_types.FederatedType(report.type_signature.result,
placements.SERVER)
accumulate_parameter_type = computation_types.StructType(
[zero_arg_type, value.type_signature.member])
accumulate = _unname_fn_parameter(accumulate, accumulate_parameter_type)
merge_parameter_type = computation_types.StructType(
[zero_arg_type, zero_arg_type])
merge = _unname_fn_parameter(merge, merge_parameter_type)
intrinsic_type = computation_types.FunctionType((
type_conversions.type_to_non_all_equal(value.type_signature),
zero_arg_type,
accumulate.type_signature,
merge.type_signature,
report.type_signature,
), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_AGGREGATE.uri,
intrinsic_type)
values = building_blocks.Struct((value, zero, accumulate, merge, report))
return building_blocks.Call(intrinsic, values)
def create_federated_apply(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated apply.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(fn.type_signature.result,
placements.SERVER)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_null_federated_broadcast():
return create_federated_broadcast(
create_federated_value(building_blocks.Struct([]), placements.SERVER))
def create_federated_broadcast(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated broadcast.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(
value.type_signature.member, placements.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_BROADCAST.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_eval(
fn: building_blocks.ComputationBuildingBlock,
placement: placements.PlacementLiteral,
) -> building_blocks.Call:
r"""Creates a called federated eval.
Call
/ \
Intrinsic Comp
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
placement: A `placements.PlacementLiteral` to use as the placement.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(fn.type_signature, computation_types.FunctionType)
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_EVAL_AT_CLIENTS.uri
all_equal = False
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_EVAL_AT_SERVER.uri
all_equal = True
else:
raise TypeError('Unsupported placement {}.'.format(placement))
result_type = computation_types.FederatedType(
fn.type_signature.result, placement, all_equal=all_equal)
intrinsic_type = computation_types.FunctionType(fn.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, fn)
def create_null_federated_map() -> building_blocks.Call:
return create_federated_map(
create_compiled_identity(computation_types.StructType([])),
create_federated_value(building_blocks.Struct([]), placements.CLIENTS))
def create_federated_map(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(arg.type_signature.member,
placements.CLIENTS)
result_type = computation_types.FederatedType(fn.type_signature.result,
placements.CLIENTS)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MAP.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_all_equal(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map of equal values.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Note: The `fn` is required to be deterministic and therefore should contain no
`building_blocks.CompiledComputations`.
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(
arg.type_signature.member, placements.CLIENTS, all_equal=True)
result_type = computation_types.FederatedType(
fn.type_signature.result, placements.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_MAP_ALL_EQUAL.uri, intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_or_apply(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map or apply depending on `arg`s placement.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
if arg.type_signature.placement is placements.CLIENTS:
if arg.type_signature.all_equal:
return create_federated_map_all_equal(fn, arg)
else:
return create_federated_map(fn, arg)
elif arg.type_signature.placement is placements.SERVER:
return create_federated_apply(fn, arg)
else:<|fim▁hole|> arg.type_signature.placement))
def create_federated_mean(
value: building_blocks.ComputationBuildingBlock,
weight: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated mean.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
weight: A `building_blocks.ComputationBuildingBlock` to use as the weight or
`None`.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if weight is not None:
py_typecheck.check_type(weight, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
if weight is not None:
intrinsic_type = computation_types.FunctionType(
(type_conversions.type_to_non_all_equal(value.type_signature),
type_conversions.type_to_non_all_equal(weight.type_signature)),
result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_WEIGHTED_MEAN.uri, intrinsic_type)
values = building_blocks.Struct((value, weight))
return building_blocks.Call(intrinsic, values)
else:
intrinsic_type = computation_types.FunctionType(
type_conversions.type_to_non_all_equal(value.type_signature),
result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MEAN.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_null_federated_secure_modular_sum():
return create_federated_secure_modular_sum(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_modular_sum(
value: building_blocks.ComputationBuildingBlock,
modulus: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called secure modular sum.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
modulus: A `building_blocks.ComputationBuildingBlock` to use as the
`modulus` value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(modulus, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
modulus.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_SECURE_MODULAR_SUM.uri, intrinsic_type)
values = building_blocks.Struct([value, modulus])
return building_blocks.Call(intrinsic, values)
def create_null_federated_secure_sum():
return create_federated_secure_sum(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_sum(
value: building_blocks.ComputationBuildingBlock,
max_input: building_blocks.ComputationBuildingBlock
) -> building_blocks.Call:
r"""Creates a called secure sum.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
max_input: A `building_blocks.ComputationBuildingBlock` to use as the
`max_input` value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(max_input, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
max_input.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_SECURE_SUM.uri,
intrinsic_type)
values = building_blocks.Struct([value, max_input])
return building_blocks.Call(intrinsic, values)
def create_null_federated_secure_sum_bitwidth():
return create_federated_secure_sum_bitwidth(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_sum_bitwidth(
value: building_blocks.ComputationBuildingBlock,
bitwidth: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called secure sum using bitwidth.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
bitwidth: A `building_blocks.ComputationBuildingBlock` to use as the
bitwidth value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(bitwidth, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
bitwidth.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_SECURE_SUM_BITWIDTH.uri, intrinsic_type)
values = building_blocks.Struct([value, bitwidth])
return building_blocks.Call(intrinsic, values)
def create_federated_select(
client_keys,
max_key,
server_val,
select_fn,
secure: bool,
) -> building_blocks.Call:
"""Creates a called `federated_select` or `federated_secure_select`."""
py_typecheck.check_type(client_keys, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(max_key, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(server_val, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(select_fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(secure, bool)
single_key_type = max_key.type_signature.member
select_fn_unnamed_param_type = computation_types.StructType([
(None, server_val.type_signature.member),
(None, single_key_type),
])
select_fn = _unname_fn_parameter(select_fn, select_fn_unnamed_param_type)
result_type = computation_types.at_clients(
computation_types.SequenceType(select_fn.type_signature.result))
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(
client_keys.type_signature), max_key.type_signature,
server_val.type_signature, select_fn.type_signature
], result_type)
intrinsic_def = intrinsic_defs.FEDERATED_SECURE_SELECT if secure else intrinsic_defs.FEDERATED_SELECT
intrinsic = building_blocks.Intrinsic(intrinsic_def.uri, intrinsic_type)
values = building_blocks.Struct([client_keys, max_key, server_val, select_fn])
return building_blocks.Call(intrinsic, values)
def create_federated_sum(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType(
type_conversions.type_to_non_all_equal(value.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_unzip(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Block:
r"""Creates a tuple of called federated maps or applies.
Block
/ \
[value=Comp] Tuple
|
[Call, Call, ...]
/ \ / \
Intrinsic Tuple Intrinsic Tuple
| |
[Lambda(arg), Ref(value)] [Lambda(arg), Ref(value)]
\ \
Sel(0) Sel(1)
\ \
Ref(arg) Ref(arg)
This function returns a tuple of federated values given a `value` with a
federated tuple type signature.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType` containing at least one element.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = structure.to_elements(value.type_signature.member)
length = len(named_type_signatures)
if length == 0:
raise ValueError('federated_zip is only supported on non-empty tuples.')
value_ref = building_blocks.Reference('value', value.type_signature)
elements = []
fn_ref = building_blocks.Reference('arg', named_type_signatures)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(fn_ref, index=index)
fn = building_blocks.Lambda(fn_ref.name, fn_ref.type_signature, sel)
intrinsic = create_federated_map_or_apply(fn, value_ref)
elements.append((name, intrinsic))
result = building_blocks.Struct(elements,
value.type_signature.member.python_container)
symbols = ((value_ref.name, value),)
return building_blocks.Block(symbols, result)
def create_federated_value(
value: building_blocks.ComputationBuildingBlock,
placement: placements.PlacementLiteral) -> building_blocks.Call:
r"""Creates a called federated value.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
placement: A `placements.PlacementLiteral` to use as the placement.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri
else:
raise TypeError('Unsupported placement {}.'.format(placement))
result_type = computation_types.FederatedType(
value.type_signature, placement, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _check_placements(
placement_values: AbstractSet[placements.PlacementLiteral]):
"""Checks if the placements of the values being zipped are compatible."""
if not placement_values:
raise TypeError('federated_zip is only supported on nested structures '
'containing at least one FederatedType, but none were '
'found.')
elif len(placement_values) > 1:
placement_list = ', '.join(placement.name for placement in placement_values)
raise TypeError('federated_zip requires all nested FederatedTypes to '
'have the same placement, but values placed at '
f'{placement_list} were found.')
def create_federated_zip(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated zip.
This function accepts a value whose type signature is a (potentially) nested
tuple structure of federated values all with the same placement, and uses
one of the federated_zip intrinsics (at client or at server) to promote the
placement to the highest level. E.g., A value of type '<A@S, <<B@S>, C@S>>'
would be mapped to a value of type '<A, <<B>, C>>@S'.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType` that may contain other nested
`computation_types.StructTypes` bottoming out in at least one element of
type `computation_Types.FederatedType`. These federated types must be at
the same placement.
Returns:
A `building_blocks.Call` whose type signature is now a federated
`computation_types.StructType`, placed at the same placement as the
leaves of `value`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(value.type_signature, computation_types.StructType)
all_placements = set()
def _record_placements(type_signature: computation_types.Type):
"""Records the placements in `type_signature` to `all_placements`."""
if type_signature.is_federated():
all_placements.add(type_signature.placement)
elif type_signature.is_struct():
for i in range(len(type_signature)):
_record_placements(type_signature[i])
else:
raise TypeError(
'Expected type signatures consisting of structures of StructType '
'bottoming out in FederatedType, found: \n{}'.format(type_signature))
_record_placements(value.type_signature)
_check_placements(all_placements)
placement = all_placements.pop()
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS.uri
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_ZIP_AT_SERVER.uri
else:
raise TypeError('Unsupported placement {}.'.format(placement))
def normalize_all_equals(element_type):
if (element_type.is_federated() and element_type.placement.is_clients() and
element_type.all_equal):
return computation_types.at_clients(element_type.member), True
return element_type, False
normalized_input_type, _ = type_transformations.transform_type_postorder(
value.type_signature, normalize_all_equals)
unplaced_output_type = type_transformations.strip_placement(
value.type_signature)
output_type = computation_types.FederatedType(unplaced_output_type, placement)
intrinsic_type = computation_types.FunctionType(normalized_input_type,
output_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
@functools.lru_cache()
def create_generic_constant(
type_spec: Optional[computation_types.Type],
scalar_value: Union[int,
float]) -> building_blocks.ComputationBuildingBlock:
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: A `computation_types.Type` containing only federated, tuple or
tensor types, or `None` to use to construct a generic constant.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
if type_spec is None:
return create_tensorflow_constant(type_spec, scalar_value)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_conversions.infer_type(scalar_value)
if (not inferred_scalar_value_type.is_tensor() or
inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError(
'Must pass a scalar value to `create_generic_constant`; encountered a '
'value {}'.format(scalar_value))
if not type_analysis.contains_only(
type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()):
raise TypeError
if type_analysis.contains_only(type_spec,
lambda t: t.is_struct() or t.is_tensor()):
return create_tensorflow_constant(type_spec, scalar_value)
elif type_spec.is_federated():
unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value)
if type_spec.placement == placements.CLIENTS:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placements.SERVER:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return building_blocks.Call(placement_function, unplaced_zero)
elif type_spec.is_struct():
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in structure.iter_elements(type_spec)]
packed_elements = building_blocks.Struct(elements)
named_tuple = create_named_tuple(packed_elements, names,
type_spec.python_container)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
def create_sequence_map(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.SequenceType(fn.type_signature.result)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_MAP.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_sequence_reduce(
value: building_blocks.ComputationBuildingBlock,
zero: building_blocks.ComputationBuildingBlock,
op: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence reduce.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
op: A `building_blocks.ComputationBuildingBlock` to use as the op function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(op, building_blocks.ComputationBuildingBlock)
op_parameter_type = computation_types.StructType(
[zero.type_signature, value.type_signature.element])
op = _unname_fn_parameter(op, op_parameter_type)
intrinsic_type = computation_types.FunctionType((
value.type_signature,
zero.type_signature,
op.type_signature,
), op.type_signature.result)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_REDUCE.uri,
intrinsic_type)
values = building_blocks.Struct((value, zero, op))
return building_blocks.Call(intrinsic, values)
def create_sequence_sum(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
intrinsic_type = computation_types.FunctionType(value.type_signature,
value.type_signature.element)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _create_naming_function(tuple_type_to_name, names_to_add, container_type):
"""Private function to construct lambda naming a given tuple type.
Args:
tuple_type_to_name: Instance of `computation_types.StructType`, the type of
the argument which we wish to name.
names_to_add: Python `list` or `tuple`, the names we wish to give to
`tuple_type_to_name`.
container_type: Optional Python container type to associate with the
resulting tuple.
Returns:
An instance of `building_blocks.Lambda` representing a function
which will take an argument of type `tuple_type_to_name` and return a tuple
with the same elements, but with names in `names_to_add` attached.
Raises:
ValueError: If `tuple_type_to_name` and `names_to_add` have different
lengths.
"""
py_typecheck.check_type(tuple_type_to_name, computation_types.StructType)
if len(names_to_add) != len(tuple_type_to_name):
raise ValueError(
'Number of elements in `names_to_add` must match number of element in '
'the named tuple type `tuple_type_to_name`; here, `names_to_add` has '
'{} elements and `tuple_type_to_name` has {}.'.format(
len(names_to_add), len(tuple_type_to_name)))
naming_lambda_arg = building_blocks.Reference('x', tuple_type_to_name)
def _create_struct_element(i):
return (names_to_add[i],
building_blocks.Selection(naming_lambda_arg, index=i))
named_result = building_blocks.Struct(
[_create_struct_element(k) for k in range(len(names_to_add))],
container_type)
return building_blocks.Lambda('x', naming_lambda_arg.type_signature,
named_result)
def create_named_tuple(
comp: building_blocks.ComputationBuildingBlock,
names: Sequence[str],
container_type=None,
) -> building_blocks.ComputationBuildingBlock:
"""Creates a computation that applies `names` to `comp`.
Args:
comp: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType`.
names: Python `tuple` or `list` containing instances of type `str` or
`None`, the names to apply to `comp`.
container_type: Optional Python container type to associated with the
resulting tuple.
Returns:
A `building_blocks.ComputationBuildingBlock` representing a
tuple with the elements from `comp` and the names from `names` attached to
the `type_signature` of those elements.
Raises:
TypeError: If the types do not match.
"""
py_typecheck.check_type(names, (list, tuple))
if not all(isinstance(x, (str, type(None))) for x in names):
raise TypeError('Expected `names` containing only instances of `str` or '
'`None`, found {}'.format(names))
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.StructType)
fn = _create_naming_function(comp.type_signature, names, container_type)
return building_blocks.Call(fn, comp)
def create_zip(
comp: building_blocks.ComputationBuildingBlock) -> building_blocks.Block:
r"""Returns a computation which zips `comp`.
Returns the following computation where `x` is `comp` unless `comp` is a
Reference, in which case the Reference is inlined and the Tuple is returned.
Block
/ \
[comp=x] Tuple
|
[Tuple, Tuple]
| |
[Sel(0), Sel(0)] [Sel(1), Sel(1)]
| | | |
Sel(0) Sel(1) Sel(0) Sel(1)
| | | |
Ref(comp) Ref(comp) Ref(comp) Ref(comp)
The returned computation intentionally drops names from the tuples, otherwise
it would be possible for the resulting type signature to contain a Tuple where
two elements have the same name and this is not allowed. It is left up to the
caller to descide if and where to add the names back.
Args:
comp: The computation building block in which to perform the merges.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.StructType)
named_type_signatures = structure.to_elements(comp.type_signature)
_, first_type_signature = named_type_signatures[0]
py_typecheck.check_type(first_type_signature, computation_types.StructType)
length = len(first_type_signature)
for _, type_signature in named_type_signatures:
py_typecheck.check_type(type_signature, computation_types.StructType)
if len(type_signature) != length:
raise TypeError(
'Expected a StructType containing StructTypes with the same '
'length, found: {}'.format(comp.type_signature))
if not comp.is_reference():
name_generator = unique_name_generator(comp)
name = next(name_generator)
ref = building_blocks.Reference(name, comp.type_signature)
else:
ref = comp
rows = []
for column in range(len(first_type_signature)):
columns = []
for row in range(len(named_type_signatures)):
sel_row = building_blocks.Selection(ref, index=row)
sel_column = building_blocks.Selection(sel_row, index=column)
columns.append(sel_column)
tup = building_blocks.Struct(columns)
rows.append(tup)
tup = building_blocks.Struct(rows)
if not comp.is_reference():
return building_blocks.Block(((ref.name, comp),), tup)
else:
return tup
def _check_generic_operator_type(type_spec):
"""Checks that `type_spec` can be the signature of args to a generic op."""
if not type_analysis.contains_only(
type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()):
raise TypeError(
'Generic operators are only implemented for arguments both containing '
'only federated, tuple and tensor types; you have passed an argument '
'of type {} '.format(type_spec))
if not (type_spec.is_struct() and len(type_spec) == 2):
raise TypeError(
'We are trying to construct a generic operator declaring argument that '
'is not a two-tuple, the type {}.'.format(type_spec))
if not type_analysis.is_binary_op_with_upcast_compatible_pair(
type_spec[0], type_spec[1]):
raise TypeError(
'The two-tuple you have passed in is incompatible with upcasted '
'binary operators. You have passed the tuple type {}, which fails the '
'check that the two members of the tuple are either the same type, or '
'the second is a scalar with the same dtype as the leaves of the '
'first. See `type_analysis.is_binary_op_with_upcast_compatible_pair` for '
'more details.'.format(type_spec))
@functools.lru_cache()
def create_tensorflow_binary_operator_with_upcast(
operator: Callable[[Any, Any], Any], type_signature: computation_types.Type
) -> building_blocks.CompiledComputation:
"""Creates TF computation upcasting its argument and applying `operator`.
The concept of upcasting is explained further in the docstring for
`apply_binary_operator_with_upcast`.
Args:
operator: Callable defining the operator.
type_signature: Value convertible to `computation_types.StructType`, with
two elements, both of the same type or the second able to be upcast to the
first, as explained in `apply_binary_operator_with_upcast`, and both
containing only tuples and tensors in their type tree.
Returns:
A `building_blocks.CompiledComputation` encapsulating a function which
upcasts the second element of its argument and applies the binary
operator.
"""
py_typecheck.check_callable(operator)
_check_generic_operator_type(type_signature)
type_analysis.check_tensorflow_compatible_type(type_signature)
tf_proto, type_signature = tensorflow_computation_factory.create_binary_operator_with_upcast(
type_signature, operator)
compiled = building_blocks.CompiledComputation(
tf_proto, type_signature=type_signature)
return compiled
def apply_binary_operator_with_upcast(
arg: building_blocks.ComputationBuildingBlock,
operator: Callable[[Any, Any], Any]) -> building_blocks.Call:
"""Constructs result of applying `operator` to `arg` upcasting if appropriate.
Notice `arg` here must be of federated type, with a named tuple member of
length 2, or a named tuple type of length 2. If the named tuple type of `arg`
satisfies certain conditions (that is, there is only a single tensor dtype in
the first element of `arg`, and the second element represents a scalar of
this dtype), the second element will be upcast to match the first. Here this
means it will be pushed into a nested structure matching the structure of the
first element of `arg`. For example, it makes perfect sense to divide a model
of type `<a=float32[784],b=float32[10]>` by a scalar of type `float32`, but
the binary operator constructors we have implemented only take arguments of
type `<T, T>`. Therefore in this case we would broadcast the `float` argument
to the `tuple` type, before constructing a biary operator which divides
pointwise.
Args:
arg: `building_blocks.ComputationBuildingBlock` of federated type whose
`member` attribute is a named tuple type of length 2, or named tuple type
of length 2.
operator: Callable representing binary operator to apply to the 2-tuple
represented by the federated `arg`.
Returns:
Instance of `building_blocks.Call`
encapsulating the result of formally applying `operator` to
`arg[0], `arg[1]`, upcasting `arg[1]` in the condition described above.
Raises:
TypeError: If the types don't match.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_callable(operator)
if arg.type_signature.is_federated():
tuple_type = arg.type_signature.member
assert tuple_type.is_struct()
elif arg.type_signature.is_struct():
tuple_type = arg.type_signature
else:
raise TypeError(
'Generic binary operators are only implemented for federated tuple and '
'unplaced tuples; you have passed {}.'.format(arg.type_signature))
tf_representing_op = create_tensorflow_binary_operator_with_upcast(
operator, tuple_type)
if arg.type_signature.is_federated():
called = create_federated_map_or_apply(tf_representing_op, arg)
else:
called = building_blocks.Call(tf_representing_op, arg)
return called
def zip_to_match_type(
*, comp_to_zip: building_blocks.ComputationBuildingBlock,
target_type: computation_types.Type
) -> Optional[building_blocks.ComputationBuildingBlock]:
"""Zips computation argument to match target type.
This function will apply the appropriate federated zips to match `comp_to_zip`
to the requested type `target_type`, subject to a few caveats. We will
traverse `computation_types.StructTypes` to match types, so for example we
would zip `<<T@P, R@P>>` to match `<<T, R>@P>`, but we will not traverse
`computation_types.FunctionTypes`. Therefore we would not apply a zip to the
parameter of `(<<T@P, R@P>> -> Q)` to match (<<T, R>@P> -> Q).
If zipping in this manner cannot match the type of `comp_to_zip` to
`target_type`, `None` will be returned.
Args:
comp_to_zip: Instance of `building_blocks.ComputationBuildingBlock` to
traverse and attempt to zip to match `target_type`.
target_type: The type to target when traversing and zipping `comp_to_zip`.
Returns:
Either a potentially transformed version of `comp_to_zip` or `None`,
depending on whether inserting a zip according to the semantics above
can transformed `comp_to_zip` to the requested type.
"""
py_typecheck.check_type(comp_to_zip, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(target_type, computation_types.Type)
def _can_be_zipped_into(source_type: computation_types.Type,
target_type: computation_types.Type) -> bool:
"""Indicates possibility of the transformation `zip_to_match_type`."""
def _struct_can_be_zipped_to_federated(
struct_type: computation_types.StructType,
federated_type: computation_types.FederatedType) -> bool:
placements_encountered = set()
def _remove_placement(
subtype: computation_types.Type
) -> Tuple[computation_types.Type, bool]:
if subtype.is_federated():
placements_encountered.add(subtype.placement)
return subtype.member, True
return subtype, False
unplaced_struct, _ = type_transformations.transform_type_postorder(
struct_type, _remove_placement)
if not (all(
x is federated_type.placement for x in placements_encountered)):
return False
if (federated_type.placement is placements.CLIENTS and
federated_type.all_equal):
# There is no all-equal clients zip; return false.
return False
return federated_type.member.is_assignable_from(unplaced_struct)
def _struct_elem_zippable(source_name, source_element, target_name,
target_element):
return _can_be_zipped_into(
source_element, target_element) and source_name in (target_name, None)
if source_type.is_struct():
if target_type.is_federated():
return _struct_can_be_zipped_to_federated(source_type, target_type)
elif target_type.is_struct():
elements_zippable = []
for (s_name, s_el), (t_name, t_el) in zip(
structure.iter_elements(source_type),
structure.iter_elements(target_type)):
elements_zippable.append(
_struct_elem_zippable(s_name, s_el, t_name, t_el))
return all(elements_zippable)
else:
return target_type.is_assignable_from(source_type)
def _zip_to_match(
*, source: building_blocks.ComputationBuildingBlock,
target_type: computation_types.Type
) -> building_blocks.ComputationBuildingBlock:
if target_type.is_federated() and source.type_signature.is_struct():
return create_federated_zip(source)
elif target_type.is_struct() and source.type_signature.is_struct():
zipped_elements = []
# Bind a reference to the source to prevent duplication in the AST.
ref_name = next(unique_name_generator(source))
ref_to_source = building_blocks.Reference(ref_name, source.type_signature)
for idx, ((_, t_el), (s_name, _)) in enumerate(
zip(
structure.iter_elements(target_type),
structure.iter_elements(source.type_signature))):
s_selection = building_blocks.Selection(ref_to_source, index=idx)
zipped_elements.append(
(s_name, _zip_to_match(source=s_selection, target_type=t_el)))
# Insert binding above the constructed structure.
return building_blocks.Block([(ref_name, source)],
building_blocks.Struct(zipped_elements))
else:
# No zipping to be done here.
return source
if target_type.is_assignable_from(comp_to_zip.type_signature):
# No zipping needs to be done; return directly.
return comp_to_zip
elif _can_be_zipped_into(comp_to_zip.type_signature, target_type):
return _zip_to_match(source=comp_to_zip, target_type=target_type)
else:
# Zipping cannot be performed here.
return None<|fim▁end|> | raise TypeError('Unsupported placement {}.'.format( |
<|file_name|>Operations.py<|end_file_name|><|fim▁begin|># This file is part of HamsiManager.
#
# Copyright (c) 2010 - 2015 Murat Demir <[email protected]>
#
# Hamsi Manager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Hamsi Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HamsiManager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import Amarok
from Amarok import Commands
from Core.MyObjects import *
import Taggers
import FileUtils as fu
from Core import Universals as uni
from Core import Dialogs
from Core import Records
from Core import ReportBug
def getDirectoriesAndValues(_filter=""):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getDirectoriesAndValues(_filter)
return None
def getAllMusicFileValuesWithNames(_filter="", _artistId=None):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getAllMusicFileValuesWithNames(_filter, _artistId)
return None
def getAllArtistsValues(_filter=""):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getAllArtistsValues(_filter)
return None
def changePaths(_values, _type="auto"):
uni.startThreadAction()
allItemNumber = len(_values)
for valueNo, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
if _type == "file" or (_type == "auto" and fu.isFile(value["newPath"])):
Commands.changeFilePath(value["oldPath"], value["newPath"])
else:
Commands.changeDirectoryPath(value["oldPath"], value["newPath"])
except:
ReportBug.ReportBug()
else:
allItemNumber = valueNo + 1
Dialogs.showState(translate("Amarok/Operations", "Changing Paths In Amarok Database"),
valueNo + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()
def changeTags(_values):
uni.startThreadAction()
allItemNumber = len(_values)
for valueNo, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
Commands.changeTag(value)
except:
ReportBug.ReportBug()
else:
allItemNumber = valueNo + 1
Dialogs.showState(translate("Amarok/Operations", "Changing Tags In Amarok Database"),
valueNo + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()
def changeArtistValues(_values):
uni.startThreadAction()
allItemNumber = len(_values)
Dialogs.showState(translate("Amarok/Operations", "Writing Music Tags"), 0, allItemNumber, True)
for x, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
musicFilePathAndArtist = Commands.changeArtistValue(value)
if musicFilePathAndArtist is not None:
artistName = musicFilePathAndArtist[0]
for musicFilePath in musicFilePathAndArtist[1]:
if fu.isWritableFileOrDir(musicFilePath, False, True):
Records.add(str(translate("Amarok/Operations", "File will be updated")), str(musicFilePath))
currentArtistName = ""
tagger = Taggers.getTagger()
if tagger is not None:
try:
tagger.loadFileForWrite(musicFilePath)
currentArtistName = tagger.getArtist()
except:
tagger.loadFileForWrite(musicFilePath)<|fim▁hole|> tagger.update()
Records.add(str(translate("Amarok/Operations", "Artist")), str(currentArtistName),
artistName)
for musicFilePath in musicFilePathAndArtist[2]:
if fu.isWritableFileOrDir(musicFilePath, False, True):
Records.add(str(translate("Amarok/Operations", "File will be updated")), str(musicFilePath))
currentArtistName = ""
tagger = Taggers.getTagger()
if tagger is not None:
try:
tagger.loadFileForWrite(musicFilePath)
currentArtistName = tagger.getAlbumArtist()
except:
tagger.loadFileForWrite(musicFilePath)
tagger.setAlbumArtist(artistName)
tagger.update()
Records.add(str(translate("Amarok/Operations", "albumArtist")), str(currentArtistName),
artistName)
except:
ReportBug.ReportBug()
else:
allItemNumber = x + 1
Dialogs.showState(translate("Amarok/Operations", "Writing Music Tags"), x + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()<|fim▁end|> | tagger.setArtist(artistName) |
<|file_name|>ScreenAction.java<|end_file_name|><|fim▁begin|>/*
* This file is part of Spoutcraft (http://wiki.getspout.org/).
*
* Spoutcraft is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Spoutcraft is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/<|fim▁hole|>public enum ScreenAction {
Open(0),
Close(1),
;
private final byte id;
ScreenAction(int id) {
this.id = (byte)id;
}
public int getId() {
return id;
}
public static ScreenAction getScreenActionFromId(int id) {
for (ScreenAction action : values()) {
if (action.getId() == id) {
return action;
}
}
return null;
}
}<|fim▁end|> | package org.getspout.spout.packet;
|
<|file_name|>luapat.rs<|end_file_name|><|fim▁begin|>// translation of Lua 5.2 string pattern code
use errors::*;
use std::ptr::null;
pub const LUA_MAXCAPTURES: usize = 32;
/* maximum recursion depth for 'match' */
const MAXCCALLS: usize = 200;
const L_ESC: u8 = b'%';
fn add(p: CPtr, count: usize) -> CPtr {
unsafe {p.offset(count as isize)}
}
fn sub(p: CPtr, count: usize) -> CPtr {
unsafe {p.offset(-(count as isize))}
}
fn next(p: CPtr) -> CPtr {
add(p, 1)
}
fn at(p: CPtr) -> u8 {
unsafe { *p }
}
fn diff(p1: CPtr, p2: CPtr) -> usize {
let d = (p1 as isize).wrapping_sub(p2 as isize);
d as usize
}
#[derive(Copy,Clone,Debug)]
pub struct LuaMatch {
pub start: usize,
pub end: usize,
}
#[derive(Copy,Clone)]
enum CapLen {
Len(usize),
Unfinished,
Position,
}
impl CapLen {
fn is_unfinished(&self) -> bool {
match *self {
CapLen::Unfinished => true,
_ => false
}
}
fn size(&self) -> Result<usize> {
match *self {
CapLen::Len(size) => Ok(size),
_ => error("capture was unfinished or positional")
}
}
}
type CPtr = *const u8;
#[derive(Copy,Clone)]
struct Capture {
init: CPtr,
len: CapLen,
}
impl Capture {
fn is_unfinished(&self) -> bool {
self.len.is_unfinished()
}
}
use std::result;
type Result<T> = result::Result<T,PatternError>;
fn error<T>(msg: &str) -> Result<T> {
Err(PatternError(msg.into()))
}
struct MatchState {
matchdepth: usize, /* control for recursive depth (to avoid stack overflow) */
src_init: CPtr, /* init of source string */
src_end: CPtr, /* end ('\0') of source string */
p_end: CPtr, /* end ('\0') of pattern */
level: usize, /* total number of captures (finished or unfinished) */
capture: [Capture; LUA_MAXCAPTURES],
}
impl MatchState {
fn new(s: CPtr, se: CPtr, pe: CPtr) -> MatchState {
MatchState {
matchdepth: MAXCCALLS,
src_init: s,
src_end: se,
p_end: pe,
level: 0,
capture: [Capture{init: null(), len: CapLen::Len(0) }; LUA_MAXCAPTURES],
}
}
fn check_capture(&self, l: usize) -> Result<usize> {
let l = l as i8 - b'1' as i8;
if l < 0 || l as usize >= self.level || self.capture[l as usize].is_unfinished() {
return error(&format!("invalid capture index %{}", l + 1));
}
Ok(l as usize)
}
fn capture_to_close(&self) -> Result<usize> {
let mut level = (self.level - 1) as isize;
while level >= 0 {
if self.capture[level as usize].is_unfinished() {
return Ok(level as usize);
}
level -= 1;
}
error("invalid pattern capture")
}
fn classend (&self, p: CPtr) -> Result<CPtr> {
let ch = at(p);
let mut next_p = next(p);
Ok(match ch {
L_ESC => {
if next_p == self.p_end {
return error("malformed pattern (ends with '%')");
}
next(next_p)
},
b'[' => {
if at(next_p) == b'^' {
next_p = next(next_p);
}
while at(next_p) != b']' {
if next_p == self.p_end {
return error("malformed pattern (missing ']')");
}
let ch = at(next_p);
next_p = next(next_p);
if ch == L_ESC && p < self.p_end {
next_p = next(next_p); /* skip escapes (e.g. `%]') */
}
}
next(next_p)
},
_ => next_p
})
}
}
fn match_class (ch: u8, class: u8) -> bool {
let res = match class.to_ascii_lowercase() {
b'a' => ch.is_ascii_alphabetic(),
b'c' => ch.is_ascii_control(),
b'd' => ch.is_ascii_digit(),
b'g' => ch.is_ascii_graphic(),
b'l' => ch.is_ascii_lowercase(),
b'p' => ch.is_ascii_punctuation(),
b's' => ch.is_ascii_whitespace(),
b'u' => ch.is_ascii_uppercase(),
b'w' => ch.is_ascii_alphanumeric(),
b'x' => ch.is_ascii_hexdigit(),
lc => return lc == ch,
};
if class.is_ascii_lowercase() { res } else {! res}
}
fn matchbracketclass (c: u8, p: CPtr, ec: CPtr) -> bool {
let mut p = p;
// [^ inverts match
let sig = if at(next(p)) == b'^' {
p = next(p);
false
} else {
true
};
p = next(p);
while p < ec {
if at(p) == L_ESC { // e.g %s
p = next(p);
if match_class(c, at(p)) {
return sig;
}
} else
// e.g a-z
if at(next(p)) == b'-' && add(p,2) < ec {
let lastc = at(p);
p = add(p,2);
if lastc <= c && c <= at(p) {
return sig;
}
} else
if at(p) == c {
return sig;
}
p = next(p);
}
return ! sig;
}
impl MatchState {
fn singlematch (&self, s: CPtr, p: CPtr, ep: CPtr) -> bool {
if s >= self.src_end {
return false;
}
let c = at(s);
let pc = at(p);
match pc {
b'.' => true, /* matches any char */
L_ESC => match_class(c, at(next(p))),
b'[' => matchbracketclass(c, p, sub(ep,1)),
_ => c == pc
}
}
fn matchbalance (&self, s: CPtr, p: CPtr) -> Result<CPtr> {
if p >= sub(self.p_end,1) {
return error("malformed pattern (missing arguments to '%b')");
}
if at(s) != at(p) {
return Ok(null());
}
// e.g. %b()
let b = at(p);
let e = at(next(p));
let mut cont = 1;
let mut s = next(s);
while s < self.src_end {
let ch = at(s);
if ch == e {
cont -= 1;
if cont == 0 {
return Ok(next(s));
}
} else
if ch == b {
cont += 1;
}
s = next(s);
}
Ok(null()) /* string ends out of balance */
}
fn max_expand(&mut self, s: CPtr, p: CPtr, ep: CPtr) -> Result<CPtr> {
let mut i = 0isize; /* counts maximum expand for item */
while self.singlematch(add(s,i as usize),p,ep) {
i += 1;
}
/* keeps trying to match with the maximum repetitions */
while i >= 0 {
let res = self.patt_match(add(s,i as usize),next(ep))?;
if ! res.is_null() {
return Ok(res);
}
i -= 1; /* else didn't match; reduce 1 repetition to try again */
}
Ok(null())
}
fn min_expand(&mut self, s: CPtr, p: CPtr, ep: CPtr) -> Result<CPtr> {
let mut s = s;
loop {
let res = self.patt_match(s,next(ep))?;
if ! res.is_null() {
return Ok(res);
} else
if self.singlematch(s, p, ep) {
s = next(s);
} else {
return Ok(null());
}
}
}
fn start_capture(&mut self, s: CPtr, p: CPtr, what: CapLen) -> Result<CPtr> {
let level = self.level;
if level >= LUA_MAXCAPTURES {
return error("too many captures");
}
self.capture[level].init = s;
self.capture[level].len = what;
self.level = level + 1;
let res = self.patt_match(s, p)?;
if res.is_null() { /* match failed? */
self.level -= 1; /* undo capture */
}
Ok(res)
}
fn end_capture(&mut self, s: CPtr, p: CPtr) -> Result<CPtr> {
let l = self.capture_to_close()?;
self.capture[l].len = CapLen::Len(diff(s,self.capture[l].init)); /* close capture */
let res = self.patt_match(s, p)?;
if res.is_null() { /* match failed? */
self.capture[l].len = CapLen::Unfinished;
}
Ok(res)
}
fn match_capture(&mut self, s: CPtr, l: usize) -> Result<CPtr> {
let l = self.check_capture(l)?;
let len = self.capture[l].len.size()?;
if diff(self.src_end, s) >= len {
unsafe {s.copy_to_nonoverlapping(self.capture[l].init as *mut u8, len);}
return Ok(add(s,len));
}
Ok(null())
}
fn patt_match(&mut self, s: CPtr, p: CPtr) -> Result<CPtr> {
let mut s = s;
let mut p = p;
self.matchdepth -= 1;
if self.matchdepth == 0 {
return error("pattern too complex");
}
if p == self.p_end { /* end of pattern? */
self.matchdepth += 1;
return Ok(s);
}
match at(p) {
b'(' => { /* start capture */
if at(next(p)) == b')' { /* position capture? */
s = self.start_capture(s, add(p,2), CapLen::Position)?;
} else {
s = self.start_capture(s, next(p), CapLen::Unfinished)?;
}
},
b')' => { /* end capture */
s = self.end_capture(s, next(p))?;
},
b'$' => {
if next(p) != self.p_end { /* is the `$' the last char in pattern? */
/* no; go to default */
return self.patt_default_match(s, p);
}
s = if s == self.src_end {s} else {null()}; /* check end of string */
}
L_ESC => { /* escaped sequences not in the format class[*+?-]? */
match at(next(p)) {
b'b' => { /* balanced string? */
s = self.matchbalance(s, add(p,2))?;
if ! s.is_null() {
// e.g, after %b()
return self.patt_match(s, add(p,4));
}
},
b'f' => { /* frontier? */
p = add(p,2);
if at(p) != b'[' {
return error("missing '[' after '%f' in pattern");
}
let ep = self.classend(p)?; /* points to what is next */
let previous = if s == self.src_init {b'\0'} else {at(sub(s,1))};
let epl = sub(ep,1);
if ! matchbracketclass(previous,p,epl)
&& matchbracketclass(at(s),p,epl) {
return self.patt_match(s, ep);
}
s = null(); /* match failed */
},
b'0'...b'9' => { /* capture results (%0-%9)? */
s = self.match_capture(s,at(next(p)) as usize)?;
if ! s.is_null() {
return self.patt_match(s, add(p,2));
}
},
_ => return self.patt_default_match(s, p)
}
},
_ => return self.patt_default_match(s, p)
}
self.matchdepth += 1;
Ok(s)
}
fn patt_default_match(&mut self, s: CPtr, p: CPtr) -> Result<CPtr> {
let mut s = s;
/* pattern class plus optional suffix */
let ep = self.classend(p)?; /* points to optional suffix */
/* does not match at least once? */
if ! self.singlematch(s, p, ep) {
let epc = at(ep);
if epc == b'*' || epc == b'?' || epc == b'-' { /* accept empty? */
return self.patt_match(s, next(ep));
} else { /* '+' or no suffix */
s = null(); /* fail */
}
} else { /* matched once */
match at(ep) { /* handle optional suffix */
b'?' => {
let res = self.patt_match(next(s),next(ep))?;
if ! res.is_null() {
s = res;
} else {
return self.patt_match(s, next(ep));
}
},
b'+' => { /* 1 or more repetitions */
s = next(s);
s = self.max_expand(s, p, ep)?;
},
b'*' => { /* 0 or more repetitions */
s = self.max_expand(s, p, ep)?;
},
b'-' => { /* 0 or more repetitions (minimum) */
s = self.min_expand(s, p, ep)? ;
},
_ => { /* no suffix */
return self.patt_match(next(s),ep);
}
}
}
self.matchdepth += 1;
Ok(s)
}
fn push_onecapture(&mut self, i: usize, s: CPtr, e: CPtr, mm: &mut [LuaMatch]) -> Result<()> {
if i >= self.level {
if i == 0 { /* ms->level == 0, too */
mm[0].start = 0;
mm[0].end = diff(e,s);
Ok(())
} else {
return error("invalid capture index");
}<|fim▁hole|> CapLen::Unfinished => error("unfinished capture"),
CapLen::Position => {
mm[i].start = diff(init,next(self.src_init));
mm[i].end = mm[i].start;
Ok(())
},
CapLen::Len(l) => {
mm[i].start = diff(init,self.src_init);
mm[i].end = mm[i].start + l;
Ok(())
}
}
}
}
fn push_captures(&mut self, s: CPtr, e: CPtr, mm: &mut [LuaMatch]) -> Result<usize> {
let nlevels = if self.level == 0 && ! s.is_null() {1} else {self.level};
for i in 0..nlevels {
self.push_onecapture(i, s, e, mm)?;
}
Ok(nlevels) /* number of strings pushed */
}
pub fn str_match_check(&mut self, p: CPtr) -> Result<()> {
let mut level_stack = [0; LUA_MAXCAPTURES];
let mut stack_idx = 0;
let mut p = p;
while p < self.p_end {
let ch = at(p);
p = next(p);
match ch {
L_ESC => {
//p = next(p);
let c = at(p);
match c {
b'b' => {
p = next(p);
if p >= self.p_end {
return error("malformed pattern (missing arguments to '%b')");
}
},
b'f' => {
p = next(p);
if at(p) != b'[' {
return error("missing '[' after '%f' in pattern");
}
p = sub(p,1); // so we see [...]
},
b'0' ... b'9' => {
let l = (c as i8) - (b'1' as i8);
println!("level {}", self.level);
if l < 0 || l as usize >= self.level || self.capture[l as usize].is_unfinished() {
return error(&format!("invalid capture index %{}", l + 1));
}
p = sub(p,1);
},
_ => {}
}
},
b'[' => {
while at(p) != b']' {
if p == self.p_end {
return error("malformed pattern (missing ']')");
}
if at(p) == L_ESC && p < self.p_end {
p = next(p);
}
p = next(p);
}
},
b'(' => {
if at(p) != b')' { // not a position capture
level_stack[stack_idx] = self.level;
stack_idx += 1;
self.capture[self.level].len = CapLen::Unfinished;
self.level += 1;
if self.level >= LUA_MAXCAPTURES {
return error("too many captures");
}
} else {
p = next(p);
}
},
b')' => {
if stack_idx == 0 {
return error("no open capture");
}
stack_idx -= 1;
self.capture[level_stack[stack_idx]].len = CapLen::Position;
},
_ => {}
}
}
if stack_idx > 0 {
return error("unfinished capture");
}
Ok(())
}
}
pub fn str_match(s: &[u8], p: &[u8], mm: &mut [LuaMatch]) -> Result<usize> {
let mut lp = p.len();
let mut p = p.as_ptr();
let ls = s.len();
let s = s.as_ptr();
let mut s1 = s;
let anchor = at(p) == b'^';
if anchor {
p = next(p);
lp -= 1; /* skip anchor character */
}
let mut ms = MatchState::new(s,add(s,ls),add(p,lp));
loop {
let res = ms.patt_match(s1, p)?;
if ! res.is_null() {
mm[0].start = diff(s1,s); /* start */
mm[0].end = diff(res,s); /* end */
return Ok(ms.push_captures(null(),null(),&mut mm[1..])? + 1);
}
s1 = next(s1);
if ! (s1 < ms.src_end && ! anchor) {
break;
}
}
Ok(0)
}
pub fn str_check(p: &[u8]) -> Result<()> {
let mut lp = p.len();
let mut p = p.as_ptr();
let anchor = at(p) == b'^';
if anchor {
p = next(p);
lp -= 1; /* skip anchor character */
}
let mut ms = MatchState::new(null(),null(),add(p,lp));
if at(sub(ms.p_end,1)) == b'%' {
return error("malformed pattern (ends with '%')");
}
ms.str_match_check(p)?;
Ok(())
}
/*
fn check(s: &[u8], p: &[u8]) {
if let Err(e) = str_check(p) {
println!("check error {}",e);
return;
}
let mut matches = [LuaMatch{start: 0, end: 0}; 10];
match str_match(s, p, &mut matches) {
Ok(n) => {
println!("ok {} matches", n);
for i in 0..n {
println!("match {:?} {:?}",
matches[i],
String::from_utf8(s[matches[i].start .. matches[i].end].to_vec())
);
}
},
Err(e) => {
println!("error: {}", e)
}
}
}
fn main() {
let mut args = std::env::args().skip(1);
let pat = args.next().unwrap();
let s = args.next().unwrap();
check(s.as_bytes(), pat.as_bytes());
//~ check(b"hello",b"%a");
//~ check(b"0hello",b"%a+");
//~ check(b"hello",b"%l(%a)");
//check(b"hello",b"he(l+)");
//check(b"k {and {so}}",b"k%s+(%b{})");
}
*/<|fim▁end|> | } else {
let init = self.capture[i].init;
match self.capture[i].len { |
<|file_name|>koa__cors-tests.ts<|end_file_name|><|fim▁begin|>import Koa = require('koa');
import cors = require('@koa/cors');
<|fim▁hole|><|fim▁end|> | const app = new Koa();
app.use(cors()); |
<|file_name|>fielderrors.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fielderrors
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/util/errors"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
)
// ValidationErrorType is a machine readable value providing more detail about why
// a field is invalid. These values are expected to match 1-1 with
// CauseType in api/types.go.
type ValidationErrorType string
// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it.
const (
// ValidationErrorTypeNotFound is used to report failure to find a requested value
// (e.g. looking up an ID).
ValidationErrorTypeNotFound ValidationErrorType = "FieldValueNotFound"
// ValidationErrorTypeRequired is used to report required values that are not
// provided (e.g. empty strings, null values, or empty arrays).
ValidationErrorTypeRequired ValidationErrorType = "FieldValueRequired"
// ValidationErrorTypeDuplicate is used to report collisions of values that must be
// unique (e.g. unique IDs).
ValidationErrorTypeDuplicate ValidationErrorType = "FieldValueDuplicate"
// ValidationErrorTypeInvalid is used to report malformed values (e.g. failed regex
// match).
ValidationErrorTypeInvalid ValidationErrorType = "FieldValueInvalid"
// ValidationErrorTypeNotSupported is used to report valid (as per formatting rules)
// values that can not be handled (e.g. an enumerated string).
ValidationErrorTypeNotSupported ValidationErrorType = "FieldValueNotSupported"
// ValidationErrorTypeForbidden is used to report valid (as per formatting rules)
// values which would be accepted by some api instances, but which would invoke behavior
// not permitted by this api instance (such as due to stricter security policy).
ValidationErrorTypeForbidden ValidationErrorType = "FieldValueForbidden"
// ValidationErrorTypeTooLong is used to report that given value is too long.
ValidationErrorTypeTooLong ValidationErrorType = "FieldValueTooLong"
)
// String converts a ValidationErrorType into its corresponding error message.
func (t ValidationErrorType) String() string {
switch t {
case ValidationErrorTypeNotFound:
return "not found"
case ValidationErrorTypeRequired:
return "required value"
case ValidationErrorTypeDuplicate:
return "duplicate value"
case ValidationErrorTypeInvalid:
return "invalid value"
case ValidationErrorTypeNotSupported:
return "unsupported value"
case ValidationErrorTypeForbidden:
return "forbidden"
case ValidationErrorTypeTooLong:
return "too long"
default:
glog.Errorf("unrecognized validation type: %#v", t)
return ""
}
}
// ValidationError is an implementation of the 'error' interface, which represents an error of validation.
type ValidationError struct {
Type ValidationErrorType
Field string
BadValue interface{}
Detail string
}
var _ error = &ValidationError{}
func (v *ValidationError) Error() string {
return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
}
func (v *ValidationError) ErrorBody() string {
var s string
switch v.Type {
case ValidationErrorTypeRequired, ValidationErrorTypeTooLong:
s = spew.Sprintf("%s", v.Type)
default:
s = spew.Sprintf("%s '%+v'", v.Type, v.BadValue)
}
if len(v.Detail) != 0 {
s += fmt.Sprintf(", Details: %s", v.Detail)
}
return s
}
// NewFieldRequired returns a *ValidationError indicating "value required"
func NewFieldRequired(field string) *ValidationError {
return &ValidationError{ValidationErrorTypeRequired, field, "", ""}
}
// NewFieldInvalid returns a *ValidationError indicating "invalid value"
func NewFieldInvalid(field string, value interface{}, detail string) *ValidationError {
return &ValidationError{ValidationErrorTypeInvalid, field, value, detail}
}
// NewFieldValueNotSupported returns a *ValidationError indicating "unsupported value"
func NewFieldValueNotSupported(field string, value interface{}, validValues []string) *ValidationError {
detail := ""
if validValues != nil && len(validValues) > 0 {
detail = "supported values: " + strings.Join(validValues, ", ")
}
return &ValidationError{ValidationErrorTypeNotSupported, field, value, detail}
}
// NewFieldForbidden returns a *ValidationError indicating "forbidden"
func NewFieldForbidden(field string, value interface{}) *ValidationError {
return &ValidationError{ValidationErrorTypeForbidden, field, value, ""}
}
// NewFieldDuplicate returns a *ValidationError indicating "duplicate value"
func NewFieldDuplicate(field string, value interface{}) *ValidationError {
return &ValidationError{ValidationErrorTypeDuplicate, field, value, ""}
}
// NewFieldNotFound returns a *ValidationError indicating "value not found"
func NewFieldNotFound(field string, value interface{}) *ValidationError {<|fim▁hole|> return &ValidationError{ValidationErrorTypeTooLong, field, value, fmt.Sprintf("must have at most %d characters", maxLength)}
}
type ValidationErrorList []error
// Prefix adds a prefix to the Field of every ValidationError in the list.
// Returns the list for convenience.
func (list ValidationErrorList) Prefix(prefix string) ValidationErrorList {
for i := range list {
if err, ok := list[i].(*ValidationError); ok {
if strings.HasPrefix(err.Field, "[") {
err.Field = prefix + err.Field
} else if len(err.Field) != 0 {
err.Field = prefix + "." + err.Field
} else {
err.Field = prefix
}
list[i] = err
} else {
glog.Warningf("Programmer error: ValidationErrorList holds non-ValidationError: %#v", list[i])
}
}
return list
}
// PrefixIndex adds an index to the Field of every ValidationError in the list.
// Returns the list for convenience.
func (list ValidationErrorList) PrefixIndex(index int) ValidationErrorList {
return list.Prefix(fmt.Sprintf("[%d]", index))
}
// NewValidationErrorFieldPrefixMatcher returns an errors.Matcher that returns true
// if the provided error is a ValidationError and has the provided ValidationErrorType.
func NewValidationErrorTypeMatcher(t ValidationErrorType) errors.Matcher {
return func(err error) bool {
if e, ok := err.(*ValidationError); ok {
return e.Type == t
}
return false
}
}
// NewValidationErrorFieldPrefixMatcher returns an errors.Matcher that returns true
// if the provided error is a ValidationError and has a field with the provided
// prefix.
func NewValidationErrorFieldPrefixMatcher(prefix string) errors.Matcher {
return func(err error) bool {
if e, ok := err.(*ValidationError); ok {
return strings.HasPrefix(e.Field, prefix)
}
return false
}
}
// Filter removes items from the ValidationErrorList that match the provided fns.
func (list ValidationErrorList) Filter(fns ...errors.Matcher) ValidationErrorList {
err := errors.FilterOut(errors.NewAggregate(list), fns...)
if err == nil {
return nil
}
// FilterOut that takes an Aggregate returns an Aggregate
agg := err.(errors.Aggregate)
return ValidationErrorList(agg.Errors())
}<|fim▁end|> | return &ValidationError{ValidationErrorTypeNotFound, field, value, ""}
}
func NewFieldTooLong(field string, value interface{}, maxLength int) *ValidationError { |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for grasp 0.6
// Project: http://graspjs.com
// Definitions by: Isaac Wolkerstorfer <https://github.com/agnoster>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.1
/// <reference types="node" />
import * as fs from "fs";
import * as cliColor from "cli-color";
// Though Grasp does not use esprima, the parser it uses (flow-parser) has a compatible Node type and existing typedefs
import { Node } from "estree";
export = grasp;
declare function grasp(options: {
args: string[] | Record<string, any> | string;
error?: (message: string) => void;
callback?: (result: string) => void;
exit?: (code: number) => void;
input?: string;
// The following are "overrides" for defaults, such as console, cli-color,
// process.stdin, or fs. In most cases grasp only makes use of a small
// surface area of the types, but I think if someone really wants to pass a
// fake fs that only implements the subset of methods they think grasp uses,
// it should be up to them to use a cast rather than the typedef to be
// overly lenient. We never know if grasp might change their internal use of
// fs, after all.
fs?: typeof fs;
console?: typeof console;
textFormat?: typeof cliColor;
stdin?: typeof process.stdin;
}): void;
declare namespace grasp {
const VERSION: string;
// Both the search and replace functions are curryable, which leads to quite
// a bit of noise. Using generic currying will discard variable name
// information, so we hand-roll it here
function search(engine: QueryEngineType, selector: string, input: string): Node[];
function search(engine: QueryEngineType, selector: string): (input: string) => Node[];
function search(engine: QueryEngineType): GraspSearchWithQueryEngine;
function replace(engine: QueryEngineType, selector: string, replacement: Replacement, input: string): string;
function replace(engine: QueryEngineType, selector: string, replacement: Replacement): (input: string) => string;
function replace(engine: QueryEngineType, selector: string): GraspReplaceWithSelector;
function replace(engine: QueryEngineType): GraspReplaceWithQueryEngine;
type QueryEngineType = "squery" | "equery";
type Replacement =
| string
| ((
getRaw: (node: Node) => string,
node: Node,
query: (q: string) => Node[],
named: { [key: string]: string | Node }
) => string);
<|fim▁hole|>
type GraspReplaceWithQueryEngine = ((selector: string) => GraspReplaceWithSelector) &
((selector: string, replacement: Replacement) => (input: string) => string) &
((selector: string, replacement: Replacement, input: string) => string);
type GraspReplaceWithSelector = ((replacement: Replacement) => (input: string) => string) &
((replacement: Replacement, input: string) => string);
}<|fim▁end|> | type GraspSearchWithQueryEngine = ((selector: string, input: string) => Node[]) &
((selector: string) => (input: string) => Node[]); |
<|file_name|>feedfetcher.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"feed fetcher"
from db import MySQLDatabase
from fetcher import FeedFetcher
def main():
db = MySQLDatabase()
fetcher = FeedFetcher()
feeds = db.get_feeds(offset=0, limit=10)
read_count = 10
while len(feeds) > 0:
for feed in feeds:
fid = feed[0]
url = feed[1]
title = feed[2]
print "fetching #{0}: {1}".format(fid, url)
entries = fetcher.fetch(url)
for entry in entries:
entry.feed_id = fid
try:
print "insert {0}".format(entry.url)
except UnicodeEncodeError:<|fim▁hole|> print "insert {0}".format(entry.url.encode('utf-8'))
db.append_feed_content(entry)
feeds = db.get_feeds(offset=read_count, limit=10)
read_count += 10
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>Uploader.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | //>>built
define("dojox/form/nls/el/Uploader",{label:"\u0395\u03c0\u03b9\u03bb\u03bf\u03b3\u03ae \u03b1\u03c1\u03c7\u03b5\u03af\u03c9\u03bd..."}); |
<|file_name|>test_buffers.py<|end_file_name|><|fim▁begin|>"""Test kytos.core.buffers module."""
import asyncio
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kytos.core.buffers import KytosBuffers, KytosEventBuffer
# pylint: disable=protected-access
class TestKytosEventBuffer(TestCase):
"""KytosEventBuffer tests."""
def setUp(self):
"""Instantiate a KytosEventBuffer."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.kytos_event_buffer = KytosEventBuffer('name', loop=self.loop)
@staticmethod
def create_event_mock(name='any'):
"""Create a new event mock."""
event = MagicMock()
event.name = name
return event
def test_put_get(self):
"""Test put and get methods."""
event = self.create_event_mock()
self.kytos_event_buffer.put(event)
queue_event = self.kytos_event_buffer.get()
self.assertEqual(queue_event, event)
def test_put__shutdown(self):
"""Test put method to shutdown event."""
event = self.create_event_mock('kytos/core.shutdown')
self.kytos_event_buffer.put(event)
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aput(self):
"""Test aput async method."""
event = MagicMock()
event.name = 'kytos/core.shutdown'
self.loop.run_until_complete(self.kytos_event_buffer.aput(event))
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aget(self):
"""Test aget async method."""
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
expected = self.loop.run_until_complete(self.kytos_event_buffer.aget())
self.assertEqual(event, expected)
@patch('janus._SyncQueueProxy.task_done')
def test_task_done(self, mock_task_done):
"""Test task_done method."""
self.kytos_event_buffer.task_done()
mock_task_done.assert_called()
@patch('janus._SyncQueueProxy.join')
def test_join(self, mock_join):
"""Test join method."""
self.kytos_event_buffer.join()
mock_join.assert_called()
def test_qsize(self):
"""Test qsize method to empty and with one event in query."""
qsize_1 = self.kytos_event_buffer.qsize()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
qsize_2 = self.kytos_event_buffer.qsize()
self.assertEqual(qsize_1, 0)
self.assertEqual(qsize_2, 1)
def test_empty(self):
"""Test empty method to empty and with one event in query."""
empty_1 = self.kytos_event_buffer.empty()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
empty_2 = self.kytos_event_buffer.empty()
self.assertTrue(empty_1)
self.assertFalse(empty_2)
<|fim▁hole|> mock_full.side_effect = [False, True]
full_1 = self.kytos_event_buffer.full()
full_2 = self.kytos_event_buffer.full()
self.assertFalse(full_1)
self.assertTrue(full_2)
class TestKytosBuffers(TestCase):
"""KytosBuffers tests."""
def setUp(self):
"""Instantiate a KytosBuffers."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.kytos_buffers = KytosBuffers(loop=self.loop)
def test_send_stop_signal(self):
"""Test send_stop_signal method."""
self.kytos_buffers.send_stop_signal()
self.assertTrue(self.kytos_buffers.raw._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_in._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_out._reject_new_events)
self.assertTrue(self.kytos_buffers.app._reject_new_events)<|fim▁end|> | @patch('janus._SyncQueueProxy.full')
def test_full(self, mock_full):
"""Test full method to full and not full query.""" |
<|file_name|>webpack.config.js<|end_file_name|><|fim▁begin|>const path = require('path');
require('dotenv').config();
const HtmlWebpackPlugin = require('html-webpack-plugin');
const CopyWebpackPlugin = require('copy-webpack-plugin');
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const { AureliaPlugin } = require('aurelia-webpack-plugin');
const { optimize: { CommonsChunkPlugin }, ProvidePlugin } = require('webpack')
const { TsConfigPathsPlugin, CheckerPlugin } = require('awesome-typescript-loader');
var BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin;
const UglifyJSPlugin = require('uglifyjs-webpack-plugin')
var BrowserSyncPlugin = require('browser-sync-webpack-plugin');
// config helpers:
const ensureArray = (config) => config && (Array.isArray(config) ? config : [config]) || []
const when = (condition, config, negativeConfig) =>
condition ? ensureArray(config) : ensureArray(negativeConfig)
// primary config:
const title = 'TechRadar';
const outDir = path.resolve(__dirname, 'dist');
const srcDir = path.resolve(__dirname, 'src');
const nodeModulesDir = path.resolve(__dirname, 'node_modules');
const baseUrl = '/';
const cssRules = [
{ loader: 'css-loader' },
{
loader: 'postcss-loader',
options: { plugins: () => [require('autoprefixer')({ browsers: ['last 2 versions'] })] }
}
]
module.exports = ({ production, server, extractCss, coverage } = {}) => ({
resolve: {
extensions: ['.ts', '.js'],
modules: [srcDir, 'node_modules'],
alias: {
'aurelia-binding$': path.resolve(__dirname, 'node_modules/aurelia-binding/dist/amd/aurelia-binding.js')
}
},
entry: {
app: ['./src/main']
},
output: {
path: outDir,
publicPath: baseUrl,
filename: production ? '[name].[chunkhash].bundle.js' : '[name].[hash].bundle.js',
sourceMapFilename: production ? '[name].[chunkhash].bundle.map' : '[name].[hash].bundle.map',
chunkFilename: production ? '[chunkhash].chunk.js' : '[hash].chunk.js',
},
devServer: {
contentBase: baseUrl,
},
module: {
rules: [
// CSS required in JS/TS files should use the style-loader that auto-injects it into the website
// only when the issuer is a .js/.ts file, so the loaders are not applied inside html templates
{
test: /\.css$/i,
issuer: [{ not: [{ test: /\.html$/i }] }],
use: extractCss ? ExtractTextPlugin.extract({
fallback: 'style-loader',
use: cssRules,
}) : ['style-loader', ...cssRules],
},
{
test: /\.css$/i,
issuer: [{ test: /\.html$/i }],
// CSS required in templates cannot be extracted safely
// because Aurelia would try to require it again in runtime
use: cssRules,
},
{ test: /\.html$/i, loader: 'html-loader' },
{ test: /\.ts$/i, loader: 'awesome-typescript-loader', exclude: nodeModulesDir },
{ test: /\.json$/i, loader: 'json-loader' },
// use Bluebird as the global Promise implementation:
{ test: /[\/\\]node_modules[\/\\]bluebird[\/\\].+\.js$/, loader: 'expose-loader?Promise' },
// exposes jQuery globally as $ and as jQuery:
{ test: require.resolve('jquery'), loader: 'expose-loader?$!expose-loader?jQuery' },
// embed small images and fonts as Data Urls and larger ones as files:
{ test: /\.(png|gif|jpg|cur)$/i, loader: 'url-loader', options: { limit: 8192 } },
{ test: /\.woff2(\?v=[0-9]\.[0-9]\.[0-9])?$/i, loader: 'url-loader', options: { limit: 10000, mimetype: 'application/font-woff2' } },
{ test: /\.woff(\?v=[0-9]\.[0-9]\.[0-9])?$/i, loader: 'url-loader', options: { limit: 10000, mimetype: 'application/font-woff' } },
// load these fonts normally, as files:
{ test: /\.(ttf|eot|svg|otf)(\?v=[0-9]\.[0-9]\.[0-9])?$/i, loader: 'file-loader' },
...when(coverage, {
test: /\.[jt]s$/i, loader: 'istanbul-instrumenter-loader',
include: srcDir, exclude: [/\.{spec,test}\.[jt]s$/i],
enforce: 'post', options: { esModules: true },
})
]
},
plugins: [
new BrowserSyncPlugin({
// browse to http://localhost:3000/ during development,
// ./public directory is being served
host: 'localhost',
port: 3000,
server: { baseDir: ['dist'] }
}),
new AureliaPlugin(),
new ProvidePlugin({
'Promise': 'bluebird',
'$': 'jquery',
'jQuery': 'jquery',
'window.jQuery': 'jquery',
}),
new TsConfigPathsPlugin(),
new CheckerPlugin(),
new HtmlWebpackPlugin({
template: 'index.ejs',
minify: production ? {
removeComments: true,
collapseWhitespace: true
} : undefined,
metadata: {
// available in index.ejs //
title, server, baseUrl
},
}),
// new UglifyJSPlugin(),
new CopyWebpackPlugin([
{ from: 'static/favicon.ico', to: 'favicon.ico' }
,{ from: './../tr-host/projects', to: 'projects' }
,{ from: 'img', to: 'img' }
]),
...when(extractCss, new ExtractTextPlugin({
filename: production ? '[contenthash].css' : '[id].css',
allChunks: true,
})),
...when(production, new CommonsChunkPlugin({
name: ['vendor']
})),
...when(production, new CopyWebpackPlugin([
{ from: 'static/favicon.ico', to: 'favicon.ico' }
]))
// ,
// new BundleAnalyzerPlugin({
// // Can be `server`, `static` or `disabled`.
// // In `server` mode analyzer will start HTTP server to show bundle report.
// // In `static` mode single HTML file with bundle report will be generated.
// // In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`.
// analyzerMode: 'static',
// // Host that will be used in `server` mode to start HTTP server.
// analyzerHost: '127.0.0.1',
// // Port that will be used in `server` mode to start HTTP server.
// analyzerPort: 8888,
// // Path to bundle report file that will be generated in `static` mode.
// // Relative to bundles output directory.
// reportFilename: 'report.html',
// // Module sizes to show in report by default.
// // Should be one of `stat`, `parsed` or `gzip`.
// // See "Definitions" section for more information.
// defaultSizes: 'parsed',<|fim▁hole|> // // Automatically open report in default browser
// openAnalyzer: false,
// // If `true`, Webpack Stats JSON file will be generated in bundles output directory
// generateStatsFile: false,
// // Name of Webpack Stats JSON file that will be generated if `generateStatsFile` is `true`.
// // Relative to bundles output directory.
// statsFilename: 'stats.json',
// // Options for `stats.toJson()` method.
// // For example you can exclude sources of your modules from stats file with `source: false` option.
// // See more options here: https://github.com/webpack/webpack/blob/webpack-1/lib/Stats.js#L21
// statsOptions: null,
// // Log level. Can be 'info', 'warn', 'error' or 'silent'.
// logLevel: 'info'
// })
],
})<|fim▁end|> | |
<|file_name|>instr_vcvtps2uqq.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;<|fim▁hole|>fn vcvtps2uqq_1() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM4)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 241, 125, 141, 121, 212], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_2() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledDisplaced(ECX, Four, 782247532, Some(OperandSize::Qword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 241, 125, 143, 121, 52, 141, 108, 38, 160, 46], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_3() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(XMM8)), operand2: Some(Direct(XMM27)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 17, 125, 142, 121, 195], OperandSize::Qword)
}
#[test]
fn vcvtps2uqq_4() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(XMM7)), operand2: Some(IndirectScaledIndexed(RDI, RSI, Two, Some(OperandSize::Qword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: None }, &[98, 241, 125, 137, 121, 60, 119], OperandSize::Qword)
}
#[test]
fn vcvtps2uqq_5() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(YMM4)), operand2: Some(Direct(XMM6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 241, 125, 171, 121, 230], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_6() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(YMM0)), operand2: Some(IndirectScaledDisplaced(ESI, Four, 1742127559, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 241, 125, 170, 121, 4, 181, 199, 193, 214, 103], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_7() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(YMM12)), operand2: Some(Direct(XMM24)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 17, 125, 171, 121, 224], OperandSize::Qword)
}
#[test]
fn vcvtps2uqq_8() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(YMM21)), operand2: Some(IndirectScaledDisplaced(RAX, Eight, 796565832, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 225, 125, 173, 121, 44, 197, 72, 161, 122, 47], OperandSize::Qword)
}
#[test]
fn vcvtps2uqq_9() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(ZMM0)), operand2: Some(Direct(YMM5)), operand3: None, operand4: None, lock: false, rounding_mode: Some(RoundingMode::Zero), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 241, 125, 251, 121, 197], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_10() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(ZMM5)), operand2: Some(IndirectDisplaced(EAX, 1019708652, Some(OperandSize::Ymmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 241, 125, 202, 121, 168, 236, 132, 199, 60], OperandSize::Dword)
}
#[test]
fn vcvtps2uqq_11() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(ZMM27)), operand2: Some(Direct(YMM21)), operand3: None, operand4: None, lock: false, rounding_mode: Some(RoundingMode::Nearest), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 33, 125, 158, 121, 221], OperandSize::Qword)
}
#[test]
fn vcvtps2uqq_12() {
run_test(&Instruction { mnemonic: Mnemonic::VCVTPS2UQQ, operand1: Some(Direct(ZMM25)), operand2: Some(IndirectDisplaced(RDI, 1515537008, Some(OperandSize::Ymmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 97, 125, 203, 121, 143, 112, 66, 85, 90], OperandSize::Qword)
}<|fim▁end|> | use ::RegScale::*;
use ::test::run_test;
#[test] |
<|file_name|>builder.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
import shutil
from jinja2 import Environment, FileSystemLoader
from webassets import Environment as AssetsEnvironment
from webassets.ext.jinja2 import AssetsExtension<|fim▁hole|>class TemplateBuilder(object):
def __init__(self, path, output,
static_path='static', static_url='static',
asset_config='config.yml'):
self.path = path
self.output = output
self.output_path = os.path.join(path, output)
self.env = Environment(loader=FileSystemLoader(path),
extensions=[AssetsExtension])
try:
config_path = os.path.join(self.path, asset_config)
asset_config = YAMLLoader(config_path)
self.assets_env = asset_config.load_environment()
except IOError:
self.assets_env = AssetsEnvironment()
if 'directory' not in self.assets_env.config:
self.assets_env.directory = self.output_path
if 'url' not in self.assets_env.config:
self.assets_env.url = static_url
self.assets_env.load_path = [self.path]
self.env.assets_environment = self.assets_env
def build_template(self, template, context={}):
tmpl = self.env.get_template(template)
dump_path = os.path.join(self.output_path, template)
tmpl.stream().dump(dump_path)
def list_files(self):
templates, other = set(), set()
if getattr(self.assets_env, '_named_bundles', None):
bundles = [fp for name, bundle in self.assets_env._named_bundles.iteritems()
for fp in bundle.contents]
else:
bundles = []
for dirpath, dirnames, filenames in os.walk(self.path):
for filename in filenames:
filepath = os.path.join(dirpath, filename) \
[len(self.path):].strip(os.path.sep).replace(os.path.sep, '/')
if filepath[:2] == './':
filepath = filepath[2:]
if self.output in filepath or filepath in bundles:
continue
elif '.html' in filepath:
templates.add(filepath)
else:
other.add(filepath)
return sorted(templates), sorted(bundles), sorted(other)
class SiteBuilder(object):
def __init__(self, path, output='public', tmpl_builder_class=TemplateBuilder, **kwargs):
self.path = path
self.output_path = os.path.join(path, output)
self.tmpl_builder = tmpl_builder_class(self.path, output, **kwargs)
def build(self):
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
templates, bundles, others = self.tmpl_builder.list_files()
for template in templates:
# XXX: for now we are not handling contexts
self.tmpl_builder.build_template(template)
for other in others:
dirname = os.path.join(self.output_path, os.path.dirname(other))
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copyfile(os.path.join(self.path, other), os.path.join(self.output_path, other))<|fim▁end|> | from webassets.loaders import YAMLLoader
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Blueprint<|fim▁hole|><|fim▁end|> |
error = Blueprint('error', __name__, )
from . import view |
<|file_name|>result.rs<|end_file_name|><|fim▁begin|>fn main() {
let a = String::from("hello");
run1(&a).unwrap();
run2(&a).unwrap();
}<|fim▁hole|> Err(x) => return Err(x),
};
let c = match bar(a) {
Ok(x) => x,
Err(x) => return Err(x),
};
let d = match baz(&b, c) {
Ok(x) => x,
Err(x) => return Err(x),
};
println!("{:?}", d);
Ok(())
}
fn run2(a: &String) -> Result<(), String> {
let b = foo(a)?;
let c = bar(a)?;
let d = baz(&b, c)?;
println!("{:?}", d);
Ok(())
}
fn foo(s: &String) -> Result<&String, String> {
if s.is_empty() {
Err(String::from("foo"))
} else {
Ok(s)
}
}
fn bar(s: &String) -> Result<i32, String> {
if s.is_empty() {
Err(String::from("bar"))
} else {
Ok(s.len() as i32)
}
}
fn baz(s: &String, i: i32) -> Result<(&String, i32), String> {
if s.is_empty() || i == 0 {
Err(String::from("baz"))
} else {
Ok((s, i))
}
}<|fim▁end|> |
fn run1(a: &String) -> Result<(), String> {
let b = match foo(a) {
Ok(x) => x, |
<|file_name|>dev_poll_reactor.hpp<|end_file_name|><|fim▁begin|>//
// detail/dev_poll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2013 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_DETAIL_DEV_POLL_REACTOR_HPP
#define BOOST_ASIO_DETAIL_DEV_POLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_DEV_POLL)
#include <cstddef>
#include <vector>
#include <sys/devpoll.h>
#include <boost/asio/detail/dev_poll_reactor_fwd.hpp>
#include <boost/asio/detail/hash_map.hpp>
#include <boost/asio/detail/limits.hpp>
#include <boost/asio/detail/mutex.hpp>
#include <boost/asio/detail/op_queue.hpp>
#include <boost/asio/detail/reactor_op.hpp>
#include <boost/asio/detail/reactor_op_queue.hpp>
#include <boost/asio/detail/select_interrupter.hpp>
#include <boost/asio/detail/socket_types.hpp>
#include <boost/asio/detail/timer_queue_base.hpp>
#include <boost/asio/detail/timer_queue_fwd.hpp>
#include <boost/asio/detail/timer_queue_set.hpp>
#include <boost/asio/detail/wait_op.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/push_options.hpp><|fim▁hole|>
class dev_poll_reactor
: public boost::asio::detail::service_base<dev_poll_reactor>
{
public:
enum op_types { read_op = 0, write_op = 1,
connect_op = 1, except_op = 2, max_ops = 3 };
// Per-descriptor data.
struct per_descriptor_data
{
};
// Constructor.
BOOST_ASIO_DECL dev_poll_reactor(boost::asio::io_service& io_service);
// Destructor.
BOOST_ASIO_DECL ~dev_poll_reactor();
// Destroy all user-defined handler objects owned by the service.
BOOST_ASIO_DECL void shutdown_service();
// Recreate internal descriptors following a fork.
BOOST_ASIO_DECL void fork_service(
boost::asio::io_service::fork_event fork_ev);
// Initialise the task.
BOOST_ASIO_DECL void init_task();
// Register a socket with the reactor. Returns 0 on success, system error
// code on failure.
BOOST_ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&);
// Register a descriptor with an associated single operation. Returns 0 on
// success, system error code on failure.
BOOST_ASIO_DECL int register_internal_descriptor(
int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op);
// Move descriptor registration from one descriptor_data object to another.
BOOST_ASIO_DECL void move_descriptor(socket_type descriptor,
per_descriptor_data& target_descriptor_data,
per_descriptor_data& source_descriptor_data);
// Post a reactor operation for immediate completion.
void post_immediate_completion(reactor_op* op, bool is_continuation)
{
io_service_.post_immediate_completion(op, is_continuation);
}
// Start a new operation. The reactor operation will be performed when the
// given descriptor is flagged as ready, or an error has occurred.
BOOST_ASIO_DECL void start_op(int op_type, socket_type descriptor,
per_descriptor_data&, reactor_op* op,
bool is_continuation, bool allow_speculative);
// Cancel all operations associated with the given descriptor. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.
BOOST_ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&);
// Cancel any operations that are running against the descriptor and remove
// its registration from the reactor.
BOOST_ASIO_DECL void deregister_descriptor(socket_type descriptor,
per_descriptor_data&, bool closing);
// Cancel any operations that are running against the descriptor and remove
// its registration from the reactor.
BOOST_ASIO_DECL void deregister_internal_descriptor(
socket_type descriptor, per_descriptor_data&);
// Add a new timer queue to the reactor.
template <typename Time_Traits>
void add_timer_queue(timer_queue<Time_Traits>& queue);
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void remove_timer_queue(timer_queue<Time_Traits>& queue);
// Schedule a new operation in the given timer queue to expire at the
// specified absolute time.
template <typename Time_Traits>
void schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op);
// Cancel the timer operations associated with the given token. Returns the
// number of operations that have been posted or dispatched.
template <typename Time_Traits>
std::size_t cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());
// Run /dev/poll once until interrupted or events are ready to be dispatched.
BOOST_ASIO_DECL void run(bool block, op_queue<operation>& ops);
// Interrupt the select loop.
BOOST_ASIO_DECL void interrupt();
private:
// Create the /dev/poll file descriptor. Throws an exception if the descriptor
// cannot be created.
BOOST_ASIO_DECL static int do_dev_poll_create();
// Helper function to add a new timer queue.
BOOST_ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);
// Helper function to remove a timer queue.
BOOST_ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);
// Get the timeout value for the /dev/poll DP_POLL operation. The timeout
// value is returned as a number of milliseconds. A return value of -1
// indicates that the poll should block indefinitely.
BOOST_ASIO_DECL int get_timeout();
// Cancel all operations associated with the given descriptor. The do_cancel
// function of the handler objects will be invoked. This function does not
// acquire the dev_poll_reactor's mutex.
BOOST_ASIO_DECL void cancel_ops_unlocked(socket_type descriptor,
const boost::system::error_code& ec);
// Helper class used to reregister descriptors after a fork.
class fork_helper;
friend class fork_helper;
// Add a pending event entry for the given descriptor.
BOOST_ASIO_DECL ::pollfd& add_pending_event_change(int descriptor);
// The io_service implementation used to post completions.
io_service_impl& io_service_;
// Mutex to protect access to internal data.
boost::asio::detail::mutex mutex_;
// The /dev/poll file descriptor.
int dev_poll_fd_;
// Vector of /dev/poll events waiting to be written to the descriptor.
std::vector< ::pollfd> pending_event_changes_;
// Hash map to associate a descriptor with a pending event change index.
hash_map<int, std::size_t> pending_event_change_index_;
// The interrupter is used to break a blocking DP_POLL operation.
select_interrupter interrupter_;
// The queues of read, write and except operations.
reactor_op_queue<socket_type> op_queue_[max_ops];
// The timer queues.
timer_queue_set timer_queues_;
// Whether the service has been shut down.
bool shutdown_;
};
} // namespace detail
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#include <boost/asio/detail/impl/dev_poll_reactor.hpp>
#if defined(BOOST_ASIO_HEADER_ONLY)
# include <boost/asio/detail/impl/dev_poll_reactor.ipp>
#endif // defined(BOOST_ASIO_HEADER_ONLY)
#endif // defined(BOOST_ASIO_HAS_DEV_POLL)
#endif // BOOST_ASIO_DETAIL_DEV_POLL_REACTOR_HPP<|fim▁end|> |
namespace boost {
namespace asio {
namespace detail { |
<|file_name|>FragmentConnection.java<|end_file_name|><|fim▁begin|>package com.app.labeli;
import com.app.labeli.member.Member;
import net.tools.*;
import android.app.ProgressDialog;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentTransaction;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Toast;
/**
* > @FragmentConnection
*
* Fragment to create a connection between app
* and API.
*
* @author Florian "Aamu Lumi" Kauder
* for the project @Label[i]
*/
public class FragmentConnection extends Fragment {
private EditText editTextLogin, editTextPassword;
private Button button;
private ProgressDialog pDialog;
public FragmentConnection() {
}
public void connectToAPI(View v){
if (editTextLogin.length() == 0)
Toast.makeText(getActivity(), "Veuillez rentrer un identifiant", Toast.LENGTH_SHORT).show();
else if (editTextPassword.length() == 0)
Toast.makeText(getActivity(), "Veuillez rentrer un mot de passe", Toast.LENGTH_SHORT).show();
else {
new InitConnection(editTextLogin.getText().toString(), editTextPassword.getText().toString())
.execute();
}
}
@Override
public void onCreate(Bundle savedInstanceState){
super.onCreate(savedInstanceState);
getActivity().getActionBar().setTitle("Connexion");
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
View v = inflater.inflate(R.layout.fragment_connection, container,
false);
<|fim▁hole|> button = (Button) v.findViewById(R.id.fragment_connection_button_connection);
button.setOnClickListener(new OnClickListener(){
@Override
public void onClick(View arg0) {
connectToAPI(arg0);
}
});
return v;
}
private class InitConnection extends AsyncTask<Void, Void, String>
{
String username, password;
boolean success;
public InitConnection(String username, String password){
this.username = username;
this.password = password;
this.success = false;
}
@Override
protected void onPreExecute() {
super.onPreExecute();
pDialog = new ProgressDialog(FragmentConnection.this.getActivity());
pDialog.setMessage("Connexion");
pDialog.setIndeterminate(false);
pDialog.setCancelable(false);
pDialog.show();
}
@Override
protected String doInBackground(Void... params)
{
success = APIConnection.login(username, password);
return null;
}
@Override
protected void onPostExecute(String file_url) {
pDialog.dismiss();
if (!success)
Toast.makeText(getActivity(), "Mauvais identifiant / mot de passe", Toast.LENGTH_SHORT).show();
else {
Member loggedUser = APIConnection.getLoggedUser();
Toast.makeText(getActivity(), "Bonjour " + loggedUser.getFirstName() + " " + loggedUser.getLastName(), Toast.LENGTH_SHORT).show();
((MainActivity)getActivity()).loadLeftMenu();
Fragment fragment = new FragmentAccount();
Bundle args = new Bundle();
fragment.setArguments(args);
FragmentTransaction transaction = getActivity().getSupportFragmentManager().beginTransaction();
transaction.setTransition(FragmentTransaction.TRANSIT_FRAGMENT_FADE);
transaction.replace(R.id.activity_main_content_frame, fragment, fragment.getClass().getName());
transaction.addToBackStack(fragment.getClass().getName());
transaction.commit();
}
}
}
}<|fim▁end|> |
editTextLogin = (EditText) v.findViewById(R.id.fragment_connection_edit_text_login);
editTextPassword = (EditText) v.findViewById(R.id.fragment_connection_edit_text_password);
|
<|file_name|>translate-contributors.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script translates invalid authors in the contributors list generated
# by generate-contributors.py. When the script encounters an author name that
# is considered invalid, it searches Github and JIRA in an attempt to search
# for replacements. This tool runs in two modes:
#
# (1) Interactive mode: For each invalid author name, this script presents
# all candidate replacements to the user and awaits user response. In this
# mode, the user may also input a custom name. This is the default.
#
# (2) Non-interactive mode: For each invalid author name, this script replaces
# the name with the first valid candidate it can find. If there is none, it
# uses the original name. This can be enabled through the --non-interactive flag.
import os
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", None)
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", None)
GITHUB_API_TOKEN = os.environ.get("GITHUB_API_TOKEN", None)
if not JIRA_USERNAME or not JIRA_PASSWORD:
sys.exit("Both JIRA_USERNAME and JIRA_PASSWORD must be set")
if not GITHUB_API_TOKEN:
sys.exit("GITHUB_API_TOKEN must be set")
# Write new contributors list to <old_file_name>.final
if not os.path.isfile(contributors_file_name):
print("Contributors file %s does not exist!" % contributors_file_name)
print("Have you run ./generate-contributors.py yet?")
sys.exit(1)
contributors_file = open(contributors_file_name, "r")
warnings = []
# In non-interactive mode, this script will choose the first replacement that is valid
INTERACTIVE_MODE = True
if len(sys.argv) > 1:
options = set(sys.argv[1:])
if "--non-interactive" in options:
INTERACTIVE_MODE = False
if INTERACTIVE_MODE:
print("Running in interactive mode. To disable this, provide the --non-interactive flag.")
# Setup Github and JIRA clients
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
github_client = Github(GITHUB_API_TOKEN)
# Load known author translations that are cached locally
known_translations = {}
known_translations_file_name = "known_translations"
known_translations_file = open(known_translations_file_name, "r")
for line in known_translations_file:
if line.startswith("#"):
continue
[old_name, new_name] = line.strip("\n").split(" - ")
known_translations[old_name] = new_name
known_translations_file.close()
# Open again in case the user adds new mappings
known_translations_file = open(known_translations_file_name, "a")
# Generate candidates for the given author. This should only be called if the given author
# name does not represent a full name as this operation is somewhat expensive. Under the
# hood, it makes several calls to the Github and JIRA API servers to find the candidates.
#
# This returns a list of (candidate name, source) 2-tuples. E.g.
# [
# (NOT_FOUND, "No full name found for Github user andrewor14"),
# ("Andrew Or", "Full name of JIRA user andrewor14"),
# ("Andrew Orso", "Full name of SPARK-1444 assignee andrewor14"),
# ("Andrew Ordall", "Full name of SPARK-1663 assignee andrewor14"),
# (NOT_FOUND, "No assignee found for SPARK-1763")
# ]
NOT_FOUND = "Not found"
def generate_candidates(author, issues):
candidates = []
# First check for full name of Github user
github_name = get_github_name(author, github_client)
if github_name:
candidates.append((github_name, "Full name of Github user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for Github user %s" % author))
# Then do the same for JIRA user
jira_name = get_jira_name(author, jira_client)
if jira_name:
candidates.append((jira_name, "Full name of JIRA user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for JIRA user %s" % author))
# Then do the same for the assignee of each of the associated JIRAs
# Note that a given issue may not have an assignee, or the assignee may not have a full name
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
except JIRAError as e:
# Do not exit just because an issue is not found!
if e.status_code == 404:
warnings.append("Issue %s not found!" % issue)
continue
raise e
jira_assignee = jira_issue.fields.assignee
if jira_assignee:
user_name = jira_assignee.name
display_name = jira_assignee.displayName
if display_name:
candidates.append(
(display_name, "Full name of %s assignee %s" % (issue, user_name)))
else:
candidates.append(
(NOT_FOUND, "No full name found for %s assignee %s" % (issue, user_name)))
else:
candidates.append((NOT_FOUND, "No assignee found for %s" % issue))
# Guard against special characters in candidate names
# Note that the candidate name may already be in unicode (JIRA returns this)
for i, (candidate, source) in enumerate(candidates):
try:
candidate = unicode(candidate, "UTF-8")
except TypeError:
# already in unicode
pass
candidate = unidecode.unidecode(candidate).strip()
candidates[i] = (candidate, source)
return candidates
# Translate each invalid author by searching for possible candidates from Github and JIRA
# In interactive mode, this script presents the user with a list of choices and have the user
# select from this list. Additionally, the user may also choose to enter a custom name.
# In non-interactive mode, this script picks the first valid author name from the candidates
# If no such name exists, the original name is used (without the JIRA numbers).
print("\n========================== Translating contributor list ==========================")
lines = contributors_file.readlines()
contributions = []
for i, line in enumerate(lines):
# It is possible that a line in the contributor file only has the github name, e.g. yhuai.
# So, we need a strip() to remove the newline.
temp_author = line.strip(" * ").split(" -- ")[0].strip()
print("Processing author %s (%d/%d)" % (temp_author, i + 1, len(lines)))
if not temp_author:
error_msg = " ERROR: Expected the following format \" * <author> -- <contributions>\"\n"
error_msg += " ERROR: Actual = %s" % line
print(error_msg)
warnings.append(error_msg)
contributions.append(line)
continue
author = temp_author.split("/")[0]
# Use the local copy of known translations where possible
if author in known_translations:
line = line.replace(temp_author, known_translations[author])
elif not is_valid_author(author):
new_author = author
issues = temp_author.split("/")[1:]
candidates = generate_candidates(author, issues)
# Print out potential replacement candidates along with the sources, e.g.
# [X] No full name found for Github user andrewor14
# [X] No assignee found for SPARK-1763
# [0] Andrew Or - Full name of JIRA user andrewor14
# [1] Andrew Orso - Full name of SPARK-1444 assignee andrewor14
# [2] Andrew Ordall - Full name of SPARK-1663 assignee andrewor14
# [3] andrewor14 - Raw Github username
# [4] Custom
candidate_names = []
bad_prompts = [] # Prompts that can't actually be selected; print these first.
good_prompts = [] # Prompts that contain valid choices
for candidate, source in candidates:
if candidate == NOT_FOUND:
bad_prompts.append(" [X] %s" % source)
else:
index = len(candidate_names)
candidate_names.append(candidate)
good_prompts.append(" [%d] %s - %s" % (index, candidate, source))
raw_index = len(candidate_names)
custom_index = len(candidate_names) + 1
for p in bad_prompts:
print(p)
if bad_prompts:
print(" ---")
for p in good_prompts:
print(p)
# In interactive mode, additionally provide "custom" option and await user response
if INTERACTIVE_MODE:
print(" [%d] %s - Raw Github username" % (raw_index, author))
print(" [%d] Custom" % custom_index)
response = raw_input(" Your choice: ")
last_index = custom_index
while not response.isdigit() or int(response) > last_index:
response = raw_input(" Please enter an integer between 0 and %d: " % last_index)
response = int(response)
if response == custom_index:
new_author = raw_input(" Please type a custom name for this author: ")
elif response != raw_index:
new_author = candidate_names[response]
# In non-interactive mode, just pick the first candidate
else:
valid_candidate_names = [name for name, _ in candidates
if is_valid_author(name) and name != NOT_FOUND]
if valid_candidate_names:
new_author = valid_candidate_names[0]
# Finally, capitalize the author and replace the original one with it
# If the final replacement is still invalid, log a warning
if is_valid_author(new_author):
new_author = capitalize_author(new_author)
else:
warnings.append(
"Unable to find a valid name %s for author %s" % (author, temp_author))
print(" * Replacing %s with %s" % (author, new_author))
# If we are in interactive mode, prompt the user whether we want to remember this new
# mapping
if INTERACTIVE_MODE and \
author not in known_translations and \
yesOrNoPrompt(
" Add mapping %s -> %s to known translations file?" % (author, new_author)):
known_translations_file.write("%s - %s\n" % (author, new_author))
known_translations_file.flush()
line = line.replace(temp_author, author)
contributions.append(line)
print("==================================================================================\n")
contributors_file.close()
known_translations_file.close()
# Sort the contributions before writing them to the new file.
# Additionally, check if there are any duplicate author rows.
# This could happen if the same user has both a valid full
# name (e.g. Andrew Or) and an invalid one (andrewor14).
# If so, warn the user about this at the end.
contributions.sort()
all_authors = set()
new_contributors_file_name = contributors_file_name + ".final"
new_contributors_file = open(new_contributors_file_name, "w")
for line in contributions:
author = line.strip(" * ").split(" -- ")[0]
if author in all_authors:
warnings.append("Detected duplicate author name %s. Please merge these manually." % author)
all_authors.add(author)
new_contributors_file.write(line)
new_contributors_file.close()
print("Translated contributors list successfully written to %s!" % new_contributors_file_name)<|fim▁hole|>if warnings:
print("\n========== Warnings encountered while translating the contributor list ===========")
for w in warnings:
print(w)
print("Please manually correct these in the final contributors list at %s." %
new_contributors_file_name)
print("==================================================================================\n")<|fim▁end|> |
# Log any warnings encountered in the process |
<|file_name|>bench_plot_parallel_pairwise.py<|end_file_name|><|fim▁begin|># Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()<|fim▁hole|> start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure("scikit-learn parallel %s benchmark results" % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel("n_samples")
plt.ylabel("Time (s)")
plt.title("Parallel %s" % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()<|fim▁end|> | func(X, n_jobs=1)
one_core.append(time.time() - start)
|
<|file_name|>bill_wizard.js<|end_file_name|><|fim▁begin|>function $(id) { return document.getElementById(id); }
function retrieve_mbts() {
g.network.simple_request('FM_MBTS_RETRIEVE.authoritative',[ses(),g.mbts_id],
function(req) {
try {
g.mbts = req.getResultObject();
$('mbts_id').value = g.mbts.id();
$('mbts_xact_type').value = g.mbts.xact_type();
$('mbts_xact_start').value = util.date.formatted_date( g.mbts.xact_start(), '%{localized}' );
$('mbts_xact_finish').value = g.mbts.xact_finish() ? util.date.formatted_date( g.mbts.xact_finish(), '%{localized}' ) : '';
$('mbts_total_owed').value = g.mbts.total_owed() ? util.money.sanitize( g.mbts.total_owed() ) : '';
$('mbts_total_paid').value = g.mbts.total_paid() ? util.money.sanitize( g.mbts.total_paid() ) : '';
$('mbts_balance_owed').value = g.mbts.balance_owed() ? util.money.sanitize( g.mbts.balance_owed() ) : '';
$('xact_type').value = g.mbts.xact_type(); $('xact_type').disabled = true;
} catch(E) {
g.error.sdump('D_ERROR',E);
}
}
);
}
function retrieve_circ() {
JSAN.use('util.widgets');
function render_circ(r_circ) {
$('title_label').hidden = false;
$('checked_out_label').hidden = false;
$('due_label').hidden = false;
$('checked_in_label').hidden = false;
$('checked_out').value = r_circ.xact_start() ? util.date.formatted_date( r_circ.xact_start(), '%{localized}' ) : '';
$('checked_in').value = r_circ.checkin_time() ? util.date.formatted_date( r_circ.checkin_time(), '%{localized}' ) : '';
$('due').value = r_circ.due_date() ? util.date.formatted_date( r_circ.due_date(), '%{localized}' ) : '';
g.network.simple_request(
'MODS_SLIM_RECORD_RETRIEVE_VIA_COPY.authoritative',
[ typeof r_circ.target_copy() == 'object' ? r_circ.target_copy().id() : r_circ.target_copy() ],
function (rreq) {
var r_mvr = rreq.getResultObject();
if (instanceOf(r_mvr,mvr)) {
util.widgets.remove_children('title');
$('title').appendChild( document.createTextNode( r_mvr.title() ) );
} else {
g.network.simple_request(
'FM_ACP_RETRIEVE',
[ typeof r_circ.target_copy() == 'object' ? r_circ.target_copy().id() : r_circ.target_copy() ],
function (rrreq) {
var r_acp = rrreq.getResultObject();
if (instanceOf(r_acp,acp)) {
util.widgets.remove_children('title');
$('title').appendChild( document.createTextNode( r_acp.dummy_title() ) );
}
}
);
}
}
);
}
if (g.circ) {
render_circ(g.circ);
} else {
g.network.simple_request('FM_CIRC_RETRIEVE_VIA_ID', [ ses(), g.mbts_id ],
function (req) {
var r_circ = req.getResultObject();
if (instanceOf(r_circ,circ)) {
render_circ(r_circ);
}
}
);
}
}
function retrieve_patron() {
JSAN.use('patron.util');
g.patron_id = xul_param('patron_id');
g.au_obj = xul_param('patron');
if (! g.au_obj) {
g.au_obj = patron.util.retrieve_fleshed_au_via_id( ses(), g.patron_id );
}
if (g.au_obj) {
$('patron_name').setAttribute('value',
patron.util.format_name( g.au_obj ) + ' : ' + g.au_obj.card().barcode()
);
}
}
function patron_bill_init() {
try {
if (typeof JSAN == 'undefined') { throw( $("commonStrings").getString('common.jsan.missing') ); }
JSAN.errorLevel = "die"; // none, warn, or die
JSAN.addRepository('/xul/server/');
JSAN.use('util.error'); g.error = new util.error();
g.error.sdump('D_TRACE','my_init() for patron_display.xul');
g.OpenILS = {}; JSAN.use('OpenILS.data'); g.OpenILS.data = new OpenILS.data();
g.OpenILS.data.init({'via':'stash'});
JSAN.use('util.network'); g.network = new util.network();
JSAN.use('util.date');
JSAN.use('util.money');
JSAN.use('util.widgets');
JSAN.use('util.functional');
var override_default_billing_type = xul_param('override_default_billing_type');
var billing_list = util.functional.filter_list( g.OpenILS.data.list.cbt, function (x) { return x.id() >= 100 || x.id() == override_default_billing_type } );
var ml = util.widgets.make_menulist(
util.functional.map_list(
billing_list.sort( function(a,b) { if (a.name()>b.name()) return 1; if (a.name()<b.name()) return -1; return 0; } ), //g.OpenILS.data.list.billing_type.sort(),
function(obj) { return [ obj.name(), obj.id() ]; } //function(obj) { return [ obj, obj ]; }
),
override_default_billing_type || billing_list.sort( function(a,b) { if (a.name()>b.name()) return 1; if (a.name()<b.name()) return -1; return 0; } )[0].id()
);
ml.setAttribute('id','billing_type');
document.getElementById('menu_placeholder').appendChild(ml);
window.bill_wizard_event_listeners = new EventListenerList();
window.bill_wizard_event_listeners.add(ml,
'command',
function() {
if ( g.OpenILS.data.hash.cbt[ ml.value ] ) {
$('bill_amount').value = g.OpenILS.data.hash.cbt[ ml.value ].default_price();
}
},
false
);
retrieve_patron();
$('wizard_billing_location').setAttribute('value', g.OpenILS.data.hash.aou[ g.OpenILS.data.list.au[0].ws_ou() ].name() );
if ( g.OpenILS.data.hash.cbt[ ml.value ] ) {
$('bill_amount').value = g.OpenILS.data.hash.cbt[ ml.value ].default_price();
}
var override_default_price = xul_param('override_default_price');
if (override_default_price) {
$('bill_amount').value = override_default_price;
}
$('bill_amount').select(); $('bill_amount').focus();
g.circ = xul_param('circ');
if (xul_param('xact_id')) {
g.mbts_id = xul_param('xact_id');
$('summary').hidden = false;
retrieve_mbts();
retrieve_circ();
}
} catch(E) {
var err_msg = $("commonStrings").getFormattedString('common.exception', ['patron/bill_wizard.xul', E]);
try { g.error.sdump('D_ERROR',err_msg); } catch(E) { dump(err_msg); }
alert(err_msg);
}
}
function patron_bill_cleanup() {
try {
window.bill_wizard_event_listeners.removeAll();
} catch(E) {
var err_msg = $("commonStrings").getFormattedString('common.exception', ['patron/bill_wizard.xul', E]);
try { g.error.sdump('D_ERROR',err_msg); } catch(E) { dump(err_msg); }
alert(err_msg);
}
}
function patron_bill_finish() {
try {
var do_not_process_bill = xul_param('do_not_process_bill');
var xact_id = xul_param('xact_id');
if (do_not_process_bill) {
xulG.proceed = true;
xulG.cbt_id = $('billing_type').value;
xulG.amount = $('bill_amount').value;
xulG.note = $('bill_note').value;
} else {
if (!xact_id) {
var grocery = new mg();
grocery.isnew('1');
grocery.billing_location( g.OpenILS.data.list.au[0].ws_ou() );
grocery.usr( g.au_obj.id() );
grocery.note( $('bill_note').value );
xact_id = g.network.request(
api.FM_MG_CREATE.app,
api.FM_MG_CREATE.method,
[ ses(), grocery ]
);
}
if (typeof xact_id.ilsevent == 'undefined') {
JSAN.use('util.money');
var billing = new mb();
billing.isnew('1');
billing.note( $('bill_note').value );
billing.xact( xact_id );
billing.amount( util.money.sanitize( $('bill_amount').value ) );
billing.btype( $('billing_type').value );
billing.billing_type( g.OpenILS.data.hash.cbt[$('billing_type').value].name() );<|fim▁hole|> );
if (typeof mb_id.ilsevent != 'undefined') throw(mb_id);
//alert($('patronStrings').getString('staff.patron.bill_wizard.patron_bill_finish.billing_added'));
xulG.mb_id = mb_id;
xulG.xact_id = xact_id;
} else {
throw(xact_id);
}
}
} catch(E) {
g.error.standard_unexpected_error_alert('bill_wizard',E);
}
}<|fim▁end|> | var mb_id = g.network.request(
api.FM_MB_CREATE.app,
api.FM_MB_CREATE.method,
[ ses(), billing ] |
<|file_name|>apps.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from django.apps import AppConfig<|fim▁hole|><|fim▁end|> |
class ScannerEngineConfig(AppConfig):
name = 'scanner_engine' |
<|file_name|>resizing_array_queue.rs<|end_file_name|><|fim▁begin|>use std::iter;
use std::fmt;
use super::{QueueOfStrings, Queue};
const INITIAL_QUEUE_CAPACITY: usize = 2;
pub struct ResizingArrayQueueOfStrings {
q: Vec<Option<String>>,
head: usize,
tail: usize
}
impl ResizingArrayQueueOfStrings {
pub fn with_capacity(capacity: usize) -> ResizingArrayQueueOfStrings {
let storage = iter::repeat(None).take(capacity).collect();
ResizingArrayQueueOfStrings {
q: storage,
head: 0,
tail: 0
}
}
fn resize(&mut self, capacity: usize) {
let cap = self.q.len();
let mut new_storage: Vec<Option<String>> = iter::repeat(None).take(capacity).collect();
let tail = if self.tail > self.head {
self.tail
} else {
self.tail + cap
};
for i in self.head .. tail{
new_storage[i] = self.q[i % cap].take();
}
self.q = new_storage;
// self.head = self.head
self.tail = tail
}
}
impl QueueOfStrings for ResizingArrayQueueOfStrings {
fn new() -> ResizingArrayQueueOfStrings {
let storage = iter::repeat(None).take(INITIAL_QUEUE_CAPACITY).collect();
ResizingArrayQueueOfStrings {
q: storage,
head: 0,
tail: 0
}
}
fn is_empty(&self) -> bool {
self.head == self.tail
}
fn enqueue(&mut self, item: String) {
let mut cap = self.q.len();
self.q[self.tail % cap] = Some(item);
self.tail = (self.tail + 1) % cap;
if self.q[self.tail % cap].is_some() {
cap = 2 * cap;
self.resize(cap);
}
}
fn dequeue(&mut self) -> String {
let cap = self.q.len();
let item = self.q[self.head % cap].take();
self.head = (self.head + 1) % cap;
item.unwrap()
}
fn size(&self) -> usize {
let cap = self.q.len();
let tail = if self.tail > self.head {
self.tail
} else {
self.tail + cap
};
tail - self.head
}
}
// generic ResizingArrayQueue
pub struct ResizingArrayQueue<T> {
q: Vec<Option<T>>,
head: usize,
tail: usize
}
impl<T: Clone> Clone for ResizingArrayQueue<T> {
fn clone(&self) -> Self {
ResizingArrayQueue {
q: self.q.clone(),
head: self.head,
tail: self.tail
}
}
}
impl<T> ResizingArrayQueue<T> {
pub fn with_capacity(capacity: usize) -> ResizingArrayQueue<T> {
let mut storage = Vec::with_capacity(capacity);
for _ in 0 .. capacity {
storage.push(None);
}
ResizingArrayQueue {
q: storage,
head: 0,
tail: 0
}
}
fn resize(&mut self, capacity: usize) {
let cap = self.q.len();
let mut new_storage: Vec<Option<T>> = Vec::with_capacity(capacity);
let tail = if self.tail > self.head {
self.tail
} else {
self.tail + cap
};
for i in 0 .. capacity {
if i >= self.head && i < tail {
new_storage.push(self.q[i % cap].take());
} else {
new_storage.push(None);
}
}
self.q = new_storage;
self.tail = tail
}
}
impl<T> Queue<T> for ResizingArrayQueue<T> {
fn new() -> ResizingArrayQueue<T> {
ResizingArrayQueue::with_capacity(INITIAL_QUEUE_CAPACITY)
}
#[inline]
fn is_empty(&self) -> bool {
self.head == self.tail
}
fn enqueue(&mut self, item: T) {
let mut cap = self.q.len();
self.q[self.tail % cap] = Some(item);
self.tail = (self.tail + 1) % cap;
// if resize before enqueue, is_empty() will fail.
if self.q[self.tail % cap].is_some() {
cap = 2 * cap;
self.resize(cap);
}
}
fn dequeue(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
let cap = self.q.len();
let item = self.q[self.head % cap].take();
self.head = (self.head + 1) % cap;
item
}
fn size(&self) -> usize {
let cap = self.q.len();
let tail = if self.tail > self.head {<|fim▁hole|> };
tail - self.head
}
}
impl<T: fmt::Debug> fmt::Debug for ResizingArrayQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.head < self.tail {
for item in self.q[self.head .. self.tail].iter() {
try!(write!(f, "{:?}, ", item.as_ref().unwrap()));
}
} else {
for item in self.q[self.head ..].iter() {
try!(write!(f, "{:?}, ", item));
}
for item in self.q[.. self.tail].iter() {
try!(write!(f, "{:?}, ", item));
}
}
Ok(())
}
}
pub struct IntoIter<T> {
queue: ResizingArrayQueue<T>
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.queue.dequeue()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.queue.size(), Some(self.queue.size()))
}
}
impl<T> IntoIterator for ResizingArrayQueue<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> IntoIter<T> {
IntoIter { queue: self }
}
}
#[test]
fn test_resizing_array_queue_of_strings() {
let mut queue: ResizingArrayQueueOfStrings = QueueOfStrings::new();
assert!(queue.is_empty());
let mut result = "to be or not to be".split(' ');
for s in "to be or not to - be - - that - - - is".split(' ') {
if s == "-" {
assert_eq!(&queue.dequeue(), result.next().unwrap())
} else {
queue.enqueue(s.into())
}
}
assert!(!queue.is_empty());
assert_eq!(2, queue.size());
}
#[test]
fn test_resizing_array_queue() {
let mut queue: ResizingArrayQueue<String> = Queue::new();
assert!(queue.is_empty());
let mut result = "to be or not to be".split(' ');
for s in "to be or not to - be - - that - - - is".split(' ') {
if s == "-" {
assert_eq!(queue.dequeue(), result.next().map(|s| s.into()))
} else {
queue.enqueue(s.into())
}
}
assert!(!queue.is_empty());
assert_eq!(2, queue.size());
}<|fim▁end|> | self.tail
} else {
self.tail + cap |
<|file_name|>nolinux.rs<|end_file_name|><|fim▁begin|>use super::TimerImpl;
use ffi::SystemError;
use reactor::Reactor;
use std::cmp;
use std::sync::atomic::{AtomicUsize, Ordering};
pub struct TimerCtl {<|fim▁hole|>}
impl TimerCtl {
pub fn new() -> Result<Self, SystemError> {
Ok(TimerCtl { timeout_nsec: AtomicUsize::new(0) })
}
pub fn startup(&self, _: &Reactor) {}
pub fn cleanup(&self, _: &Reactor) {}
pub fn wait_duration(&self, max: usize) -> usize {
cmp::min(self.timeout_nsec.load(Ordering::Relaxed), max)
}
pub fn reset_timeout(&self, timer: &TimerImpl) {
self.timeout_nsec.store(
timer.expiry.left(),
Ordering::SeqCst,
);
timer.ctx.as_reactor().interrupt();
}
}<|fim▁end|> | timeout_nsec: AtomicUsize, |
<|file_name|>eina.rs<|end_file_name|><|fim▁begin|>// Eina Rust bindings for EFL.
// Copyright (C) 2014 Luis Araujo <[email protected]>
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
extern crate libc;
extern crate core;
use std::ptr;
use std::mem::transmute;
use std::option::Option;
use eina::core::mem::uninitialized;
use eina::libc::{c_void, c_int, c_uint};
use eseful;
pub type EinaBool = u8;
pub static EINA_FALSE: EinaBool = 0u8;
pub static EINA_TRUE: EinaBool = 1u8;
type _EinaMagic = uint;
type _CEinaMagic = c_uint;
/*
* EinaList object.
*/
/// EinaList object.
pub struct EinaList<'r, T> {
_eo: *mut _EinaList<'r, T>
}
/// Representation of an Eina_List.
pub struct _EinaList<'r, T> {
data: &'r T,
next: *mut _EinaList<'r, T>,
prev: *mut _EinaList<'r, T>,
accounting: *_EinaListAccounting<'r, T>,
__magic: _EinaMagic
}
pub struct _EinaListAccounting<'r, T> {
last: *mut _EinaList<'r, T>,
count: uint,
__magic: _EinaMagic
}
/// C representation of an Eina_List.
pub struct _CEinaList {
data: *c_void,
next: *_CEinaList,
prev: *_CEinaList,
accounting: *_CEinaListAccounting,
__magic: _CEinaMagic
}
pub struct _CEinaListAccounting {
last: *_CEinaList,
count: c_uint,
__magic: _CEinaMagic
}
/*
* Inlined list type (EinaInlist).
*/
/// Inlined list type.
pub struct EinaInlist {
_eo: *_EinaInlist
}
pub struct _EinaInlist {
/// Next node
next: *_EinaInlist,
/// Previous node
prev: *_EinaInlist,
/// Last node
last: *_EinaInlist
}
/*
* EinaHash type.
*/
/// Type for a generic hash table.
pub struct _EinaHash<T> {
key_length_cb: EinaKeyLength<T>,
key_cmp_cb: EinaKeyCmp<T>,
key_hash_cb: EinaKeyHash<T>,
data_free_cb: EinaFreeCb<T>,
buckets: **EinaRbtree,
size: int,
mask: int,
population: int,
buckets_power_size: int,
__magic: _EinaMagic
}
pub struct _CEinaHash {
key_length_cb: _CEinaKeyLength,
key_cmp_cb: _CEinaKeyCmp,
key_hash_cb: _CEinaKeyHash,
data_free_cb: _CEinaFreeCb,
buckets: **EinaRbtree,
size: c_int,
mask: c_int,
population: c_int,
buckets_power_size: c_int,
__magic: _CEinaMagic
}
/// Type for a function to determine the length of a hash key.
pub type EinaKeyLength<T> = fn (&T) -> int;
type _CEinaKeyLength = fn (*c_void) -> c_int;
/// Type for a function to compare two hash keys.
pub type EinaKeyCmp<T> = fn (&T, int, &T, int) -> c_int;
type _CEinaKeyCmp = fn (*c_void, c_int, *c_void, c_int) -> c_int;
/// Type for a function to create a hash key.
pub type EinaKeyHash<T> = fn (&T, int) -> int;
type _CEinaKeyHash = fn (*c_void, c_int) -> c_int;
/// A callback type used to free data when iterating over a container.
pub type EinaFreeCb<T> = fn (&T);
type _CEinaFreeCb = fn (*c_void);
/// Type for a Red-Black tree node. It should be inlined into user's type.
pub struct EinaRbtree {
son: *[EinaRbtree, ..2],
color: uint
}
#[link(name = "eina")]
extern "C" {
fn eina_init() -> c_int;
fn eina_shutdown() -> c_int;
fn eina_list_free(list: *_CEinaList) -> *_CEinaList;
fn eina_list_append(list: *_CEinaList, data: *c_void) -> *_CEinaList;
fn eina_list_prepend(list: *_CEinaList, data: *c_void) -> *_CEinaList;
/* Inline list type */
fn eina_inlist_append(in_list: *_EinaInlist, in_item: *_EinaInlist) -> *_EinaInlist;
fn eina_inlist_prepend(in_list: *_EinaInlist, in_item: *_EinaInlist) -> *_EinaInlist;
fn eina_inlist_promote(list: *_EinaInlist, item: *_EinaInlist) -> *_EinaInlist;
fn eina_inlist_demote(list: *_EinaInlist, item: *_EinaInlist) -> *_EinaInlist;
fn eina_inlist_remove(in_list: *_EinaInlist, in_item: *_EinaInlist) -> *_EinaInlist;
/* Hash type */
fn eina_hash_stringshared_new(data_free_cb: _CEinaFreeCb) -> *_CEinaHash;
fn eina_hash_string_superfast_new(data_free_cb: _CEinaFreeCb) -> *_CEinaHash;
fn eina_hash_add(hash: *_CEinaHash, key: *c_void, data: *c_void) -> EinaBool;
fn eina_hash_find(hash: *_CEinaHash, key: *c_void) -> *c_void;
fn eina_hash_population(hash: *_CEinaHash) -> c_int;
fn eina_hash_free(hash: *_CEinaHash);
}
/* Implementations for EinaList type */
impl<'r, T> EinaList<'r, T> {
/// Create high level EinaList object.
pub fn new(el: *mut _EinaList<'r, T>) -> EinaList<'r, T> {
EinaList { _eo: el }
}
}
/// EinaList implements the Iterator trait.
impl<'r, T> Iterator<&'r T> for EinaList<'r, T> {
fn next(&mut self) -> Option<&'r T> {
let v = list_data_get(self._eo);
*self = match list_next(self._eo) {
None => EinaList { _eo: ptr::mut_null() },
Some(l) => EinaList { _eo: l }
};
return v
}
}
/* Implementations for EinaInlist type */
impl EinaInlist {
pub fn new(el: *_EinaInlist) -> EinaInlist {
EinaInlist { _eo: el }
}
}
impl<'r, T> Iterator<&'r T> for EinaInlist {
fn next(&mut self) -> Option<&'r T> {
let v = if self._eo.is_null() {
None
} else {
let elm: &T = inlist_container_get(*self);
Some(elm)
};
// Get next value if current value is valid (Some).
if v.is_some() { self._eo = unsafe { (*self._eo).next } };
return v
}
}
/// Initialize the Eina library.
pub fn init() -> int { unsafe { eina_init() as int } }
/// Shut down the Eina library.
pub fn shutdown() -> int { unsafe { eina_shutdown() as int } }
/// Free an entire list and all the nodes, ignoring the data contained.
pub fn list_free<T>(list: *mut _EinaList<T>) -> *mut _EinaList<T> {
unsafe {
transmute::<*_CEinaList,*mut _EinaList<T>>(
eina_list_free(transmute::<*mut _EinaList<T>,*_CEinaList>(list)))
}
}
/// Append the given data to the given linked list.
/// This function appends data to list. If list is 'None', a new list is returned.
pub fn list_append<T>(list: Option<*mut _EinaList<T>>, data: &T) -> *mut _EinaList<T> {
unsafe {
let c_data: *c_void = transmute(data);
match list {
None => transmute::<*_CEinaList,*mut _EinaList<T>>(
eina_list_append(ptr::null(), c_data)),
Some(l) => transmute::<*_CEinaList,*mut _EinaList<T>>(
eina_list_append(transmute::<*mut _EinaList<T>,*_CEinaList>(l), c_data))
}
}
}
/// Prepends the given data to the given linked list.
/// This function prepends data to list. If list is 'None', a new list is returned.
pub fn list_prepend<T>(list: Option<*mut _EinaList<T>>, data: &T) -> *mut _EinaList<T> {
unsafe {
let c_data: *c_void = transmute(data);
match list {
None => transmute::<*_CEinaList,*mut _EinaList<T>>(
eina_list_prepend(ptr::null(), c_data)),
Some(l) => transmute::<*_CEinaList,*mut _EinaList<T>>(
eina_list_prepend(transmute::<*mut _EinaList<T>,*_CEinaList>(l), c_data))
}
}
}
/// Get the list node data member.
#[inline]
pub fn list_data_get<'r, T>(list: *mut _EinaList<'r, T>) -> Option<&'r T> {
if list.is_null() { return None }
unsafe { Some((*list).data) }
}
/// Set the list node data member.
#[inline]
pub fn list_data_set<'r, T>(list: *mut _EinaList<'r, T>, new_data: &'r T) -> Option<&'r T> {
if list.is_null() { return None }
unsafe {
let olddata = (*list).data;
(*list).data = new_data;
Some(olddata)
}
}
/// Get the last list node in the list.
#[inline]
pub fn list_last<'a, T>(list: *mut _EinaList<'a, T>) -> Option<*mut _EinaList<'a, T>> {
if list.is_null() { return None }
unsafe { Some((*(*list).accounting).last) }
}
/// Get the next list node after the specified list node.
#[inline]
pub fn list_next<'a, T>(list: *mut _EinaList<'a, T>) -> Option<*mut _EinaList<'a, T>> {
if list.is_null() { return None }
unsafe {
// Let's be nice and return None for nullable next
if (*list).next.is_null() { return None }
Some((*list).next)
}
}
/// Get the previous list node before the specified list node.
#[inline]
pub fn list_prev<'a, T>(list: *mut _EinaList<'a, T>) -> Option<*mut _EinaList<'a, T>> {
if list.is_null() { return None }
unsafe {
// Let's be nice and return None for nullable prev
if (*list).prev.is_null() { return None }
Some((*list).prev)
}
}
/// Get the count of the number of items in a list.
#[inline]
pub fn list_count<'r, T>(list: *mut _EinaList<'r, T>) -> uint {
if list.is_null() { return 0 }
unsafe {
(*(*list).accounting).count
}
}
/// Convenient function to get the last list node data member.
#[inline]
pub fn list_last_data_get<'r, T>(list: *mut _EinaList<'r, T>) -> Option<&'r T> {
match list_last(list) {
None => None,
Some(last) => list_data_get(last)<|fim▁hole|>/// Add a new node to end of a list.
pub fn inlist_append(in_list: Option<EinaInlist>, in_item: *_EinaInlist) -> EinaInlist {
EinaInlist {
_eo: unsafe {
match in_list {
None => eina_inlist_append(ptr::null(), in_item),
Some(lst) => eina_inlist_append(lst._eo, in_item)
}
}
}
}
/// Add a new node to beginning of list.
pub fn inlist_prepend(in_list: Option<EinaInlist>, in_item: *_EinaInlist) -> EinaInlist {
EinaInlist {
_eo: unsafe {
match in_list {
None => eina_inlist_prepend(ptr::null(), in_item),
Some(lst) => eina_inlist_prepend(lst._eo, in_item)
}
}
}
}
/// Move existing node to beginning of list.
pub fn inlist_promote(in_list: Option<EinaInlist>, in_item: *_EinaInlist) -> EinaInlist {
EinaInlist {
_eo: unsafe {
match in_list {
None => eina_inlist_promote(ptr::null(), in_item),
Some(lst) => eina_inlist_promote(lst._eo, in_item)
}
}
}
}
/// Move existing node to end of list.
pub fn inlist_demote(in_list: Option<EinaInlist>, in_item: *_EinaInlist) -> EinaInlist {
EinaInlist {
_eo: unsafe {
match in_list {
None => eina_inlist_demote(ptr::null(), in_item),
Some(lst) => eina_inlist_demote(lst._eo, in_item)
}
}
}
}
/// Remove node from list.
pub fn inlist_remove(in_list: EinaInlist, in_item: *_EinaInlist) -> EinaInlist {
EinaInlist {
_eo: unsafe { eina_inlist_remove(in_list._eo, in_item) }
}
}
/// Get the container object of an in_list.
pub fn inlist_container_get<T>(in_list: EinaInlist) -> &T {
unsafe { transmute(in_list._eo) }
}
/// Convenient function for object allocation.
#[inline]
pub fn object<T>() -> T {
unsafe { uninitialized::<T>() }
}
/// Macro to get the inlist object of a struct.
#[macro_export]
macro_rules! inlist_get(
($inlist:ident) => (unsafe {
use std::mem::transmute;
transmute(&($inlist.__in_list))
})
)
/* Hash type functions */
/// Create a new hash table optimized for stringshared values.
pub fn hash_stringshared_new<T>(data_free_cb: EinaFreeCb<T>) -> *mut _EinaHash<T> {
unsafe { transmute(eina_hash_stringshared_new(transmute(data_free_cb))) }
}
/// Create a new hash table for use with strings.
pub fn hash_string_superfast_new<T>(data_free_cb: EinaFreeCb<T>) -> *mut _EinaHash<T> {
unsafe { transmute(eina_hash_string_superfast_new(transmute(data_free_cb))) }
}
/// Add an entry to the given hash table.
pub fn hash_add<T>(hash: *mut _EinaHash<T>, key: &T, data: &T) -> bool {
eseful::from_eina_to_bool(unsafe {
eina_hash_add(transmute(hash), transmute(key), transmute(data))
})
}
/// Retrieve a specific entry in the given hash table.
pub fn hash_find<T>(hash: *mut _EinaHash<T>, key: &T) -> &T {
unsafe { transmute(eina_hash_find(transmute(hash), transmute(key))) }
}
/// Returns the number of entries in the given hash table.
pub fn hash_population<T>(hash: *mut _EinaHash<T>) -> int {
unsafe { eina_hash_population(transmute(hash)) as int }
}
/// Free the given hash table resources.
pub fn hash_free<T>(hash: *mut _EinaHash<T>) {
unsafe { eina_hash_free(transmute(hash)) }
}<|fim▁end|> | }
}
/* Inline list functions */ |
<|file_name|>OpenningState.java<|end_file_name|><|fim▁begin|>package com.cheng.zenofdesignpatterns.patterns.state.liftstate;
/**
* 在电梯门开启的状态下能做什么事情
*/
public class OpenningState extends LiftState {
// 开启当然可以关闭了,我就想测试一下电梯门开关功能
@Override
public void close() {
// 状态修改
super.context.setLiftState(LiftContext.closeingState);
// 动作委托为CloseState来执行
super.context.getLiftState().close();
}<|fim▁hole|> public void open() {
System.out.println("电梯门开启...");
}
// 门开着电梯就想跑,这电梯,吓死你!
@Override
public void run() {
// do nothing;
}
// 开门还不停止?
public void stop() {
// do nothing;
}
}<|fim▁end|> |
// 打开电梯门
@Override |
<|file_name|>classic.py<|end_file_name|><|fim▁begin|>import sys
import time
import json
import logging
import random
import tornado.options
from tornado.options import define, options
from tornado import gen
define('srp_root',default='http://192.168.56.1')
#define('srp_root',default='https://remote-staging.utorrent.com')
#define('srp_root',default='https://remote.utorrent.com')
define('debug',default=True)
define('verbose',default=1, type=int)
tornado.options.parse_command_line()
if options.debug:
import pdb
import tornado.ioloop
from falcon_api.session import Session
from falcon_api.util import asyncsleep
from falcon_api.classic import Client
import tornado.httpclient
httpclient = tornado.httpclient.AsyncHTTPClient(force_instance=True, max_clients=1)
@gen.engine
def test_login():
username = sys.argv[1]
password = sys.argv[2]
# check result..
#torrent = 'http://www.clearbits.net/get/503-control-alt-deus---made-of-fire.torrent'
hash = ''.join([random.choice( list('abcdef') + map(str,range(10)) ) for _ in range(40)])
torrent = 'magnet:?xt=urn:btih:%s' % hash
for _ in range(1):
client = Client(username, password)
client.sync()
yield gen.Task( asyncsleep, 1 )
#client.add_url(torrent)
<|fim▁hole|> client.stop()
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
tasks.append( gen.Task( torrent.fetch_files ) )
tasks.append( gen.Task( torrent.fetch_metadata ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
for file in torrent.files:
link = file.webseed_link()
print link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
while tasks:
some_tasks = [tasks.pop() for _ in range(5)]
logging.info('executing tasks of len %s' % len(some_tasks))
responses = yield gen.Multi( some_tasks )
logging.info('responses %s' % [(r.code, len(r.body)) for r in responses])
if False:
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
link = torrent.webseed_link()
print torrent.get('name'), torrent.get('progress'), link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.instance()
test_login()
ioloop.start()<|fim▁end|> | |
<|file_name|>client.go<|end_file_name|><|fim▁begin|>/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.<|fim▁hole|>
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blb
import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/baiducloud/baiducloud-sdk-go/bce"
)
// Endpoint contains all endpoints of Baidu Cloud BCC.
var Endpoint = map[string]string{
"bj": "blb.bj.baidubce.com",
"gz": "blb.gz.baidubce.com",
"su": "blb.su.baidubce.com",
"hk": "blb.hkg.baidubce.com",
"bd": "blb.bd.baidubce.com",
}
// Client is the BLB client implemention for Baidu Cloud BLB API.
type Client struct {
*bce.Client
}
// NewBLBClient new a client for BLB
func NewBLBClient(config *bce.Config) *Client {
bceClient := bce.NewClient(config)
return &Client{bceClient}
}
// GetURL generates the full URL of http request for Baidu Cloud BLB API.
func (c *Client) GetURL(version string, params map[string]string) string {
host := c.Endpoint
if host == "" {
host = Endpoint[c.GetRegion()]
}
uriPath := version
return c.Client.GetURL(host, uriPath, params)
}<|fim▁end|> | You may obtain a copy of the License at |
<|file_name|>0009_auto_20181105_2039.py<|end_file_name|><|fim▁begin|># Generated by Django 2.0.8 on 2018-11-05 20:39
from django.db import migrations
<|fim▁hole|> ]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'default_permissions': ('add', 'change', 'delete', 'view')},
),
]<|fim▁end|> | class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_auto_20180730_2035'), |
<|file_name|>logo.js<|end_file_name|><|fim▁begin|><|fim▁hole|> evt.preventDefault();
Meteor.swiperV.slideTo(0);
}
});<|fim▁end|> | Template.botanikaLogo.events({
'click .botanika-logo' (evt) { |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md><|fim▁hole|><|fim▁end|> | extern crate build;
fn main() {
build::link("netsh", true)
} |
<|file_name|>model_control_one_enabled_Quantization_MovingMedian_Seasonal_DayOfWeek_LSTM.py<|end_file_name|><|fim▁begin|>import tests.model_control.test_ozone_custom_models_enabled as testmod
<|fim▁hole|>testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['Seasonal_DayOfWeek'] , ['LSTM'] );<|fim▁end|> | |
<|file_name|>helper9.js<|end_file_name|><|fim▁begin|><|fim▁hole|>if (typeof(console.log) == "undefined") { console.log = function() { return 0; }; };
String.prototype.capitalize = function() {
return this.charAt(0).toUpperCase() + this.slice(1);
};
if(typeof jQuery !== "undefined")
jQuery.support.placeholder = (function () {
var i = document.createElement('input');
return 'placeholder' in i;
})();<|fim▁end|> | if (typeof(console) == "undefined") { console = {}; } |
<|file_name|>GpxFromInstructions.java<|end_file_name|><|fim▁begin|>/*
* Licensed to GraphHopper GmbH under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* GraphHopper GmbH licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.graphhopper.util.gpx;
import com.graphhopper.util.*;
import com.graphhopper.util.shapes.GHPoint3D;
import java.text.DateFormat;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
// todo: the code here does not really belong into core, but we moved it here for now so its available from
// map-matching resource (it cannot be in the api module, because it uses AngleCalc). Probably we should separate the
// actual gpx conversion (which belongs to the web module) from the angle calculations. Or at least move this code back
// into web-bundle once MapMatchingResource is in core. Or we need another module for code that is used in different
// modules like web, but does not really fit into core either.
public class GpxFromInstructions {
private static final AngleCalc AC = AngleCalc.ANGLE_CALC;
static String simpleXMLEscape(String str) {
// We could even use the 'more flexible' CDATA section but for now do the following. The 'and' could be important sometimes:
return str.replaceAll("&", "&").
// but do not care for:
replaceAll("[\\<\\>]", "_");
}
public static List<GPXEntry> createGPXList(InstructionList instructions) {
List<GPXEntry> gpxList = new ArrayList<>();
long timeOffset = 0;
for (Instruction instruction : instructions) {
int i = 0;
for (GHPoint3D point : instruction.getPoints()) {
GPXEntry gpxEntry;
if (i == 0) {
gpxEntry = new GPXEntry(point, timeOffset);
} else {
// We don't have timestamps for pillar nodes
gpxEntry = new GPXEntry(point);
}
gpxList.add(gpxEntry);
i++;
}
timeOffset = timeOffset + instruction.getTime();
}
return gpxList;
}
private static void createWayPointBlock(StringBuilder output, Instruction instruction, DecimalFormat decimalFormat, Translation tr) {
output.append("\n<wpt ");
output.append("lat=\"").append(decimalFormat.format(instruction.getPoints().getLatitude(0)));
output.append("\" lon=\"").append(decimalFormat.format(instruction.getPoints().getLongitude(0))).append("\">");
String name;
if (instruction.getName().isEmpty())
name = instruction.getTurnDescription(tr);
else
name = instruction.getName();
output.append(" <name>").append(simpleXMLEscape(name)).append("</name>");
output.append("</wpt>");
}
public static String createGPX(InstructionList instructions, String trackName, long startTimeMillis, boolean includeElevation, boolean withRoute, boolean withTrack, boolean withWayPoints, String version, Translation tr) {
DateFormat formatter = Helper.createFormatter();
DecimalFormat decimalFormat = new DecimalFormat("#", DecimalFormatSymbols.getInstance(Locale.ROOT));
decimalFormat.setMinimumFractionDigits(1);
decimalFormat.setMaximumFractionDigits(6);
decimalFormat.setMinimumIntegerDigits(1);
String header = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>"
+ "<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\""
+ " creator=\"Graphhopper version " + version + "\" version=\"1.1\""
// This xmlns:gh acts only as ID, no valid URL necessary.
// Use a separate namespace for custom extensions to make basecamp happy.
+ " xmlns:gh=\"https://graphhopper.com/public/schema/gpx/1.1\">"
+ "\n<metadata>"
+ "<copyright author=\"OpenStreetMap contributors\"/>"
+ "<link href=\"http://graphhopper.com\">"
+ "<text>GraphHopper GPX</text>"
+ "</link>"
+ "<time>" + formatter.format(startTimeMillis) + "</time>"
+ "</metadata>";
StringBuilder gpxOutput = new StringBuilder(header);
if (!instructions.isEmpty()) {
if (withWayPoints) {
createWayPointBlock(gpxOutput, instructions.get(0), decimalFormat, tr); // Start
for (Instruction currInstr : instructions) {
if ((currInstr.getSign() == Instruction.REACHED_VIA) // Via
|| (currInstr.getSign() == Instruction.FINISH)) // End
{
createWayPointBlock(gpxOutput, currInstr, decimalFormat, tr);
}
}
}
if (withRoute) {
gpxOutput.append("\n<rte>");
Instruction nextInstr = null;
for (Instruction currInstr : instructions) {
if (null != nextInstr)
createRteptBlock(gpxOutput, nextInstr, currInstr, decimalFormat, tr);
nextInstr = currInstr;
}
createRteptBlock(gpxOutput, nextInstr, null, decimalFormat, tr);
gpxOutput.append("\n</rte>");
}
}
if (withTrack) {
gpxOutput.append("\n<trk><name>").append(trackName).append("</name>");
gpxOutput.append("<trkseg>");
for (GPXEntry entry : createGPXList(instructions)) {
gpxOutput.append("\n<trkpt lat=\"").append(decimalFormat.format(entry.getPoint().getLat()));
gpxOutput.append("\" lon=\"").append(decimalFormat.format(entry.getPoint().getLon())).append("\">");
if (includeElevation)
gpxOutput.append("<ele>").append(Helper.round2(((GHPoint3D) entry.getPoint()).getEle())).append("</ele>");
if (entry.getTime() != null)
gpxOutput.append("<time>").append(formatter.format(startTimeMillis + entry.getTime())).append("</time>");
gpxOutput.append("</trkpt>");
}
gpxOutput.append("\n</trkseg>");
gpxOutput.append("\n</trk>");
}
// we could now use 'wpt' for via points
gpxOutput.append("\n</gpx>");
return gpxOutput.toString();
}
private static void createRteptBlock(StringBuilder output, Instruction instruction, Instruction nextI, DecimalFormat decimalFormat, Translation tr) {
output.append("\n<rtept lat=\"").append(decimalFormat.format(instruction.getPoints().getLatitude(0))).
append("\" lon=\"").append(decimalFormat.format(instruction.getPoints().getLongitude(0))).append("\">");
if (!instruction.getName().isEmpty())
output.append("<desc>").append(simpleXMLEscape(instruction.getTurnDescription(tr))).append("</desc>");
output.append("<extensions>");
output.append("<gh:distance>").append(Helper.round(instruction.getDistance(), 1)).append("</gh:distance>");
output.append("<gh:time>").append(instruction.getTime()).append("</gh:time>");
String direction = calcDirection(instruction, nextI);
if (!direction.isEmpty())
output.append("<gh:direction>").append(direction).append("</gh:direction>");
double azimuth = calcAzimuth(instruction, nextI);
if (!Double.isNaN(azimuth))
output.append("<gh:azimuth>").append(Helper.round2(azimuth)).append("</gh:azimuth>");
if (instruction instanceof RoundaboutInstruction) {
RoundaboutInstruction ri = (RoundaboutInstruction) instruction;
output.append("<gh:exit_number>").append(ri.getExitNumber()).append("</gh:exit_number>");
}
output.append("<gh:sign>").append(instruction.getSign()).append("</gh:sign>");
output.append("</extensions>");
output.append("</rtept>");
}
/**
* Return the direction like 'NE' based on the first tracksegment of the instruction. If
* Instruction does not contain enough coordinate points, an empty string will be returned.
*/
public static String calcDirection(Instruction instruction, Instruction nextI) {
double azimuth = calcAzimuth(instruction, nextI);
if (Double.isNaN(azimuth))
return "";
return AC.azimuth2compassPoint(azimuth);
}
/**
* Return the azimuth in degree based on the first tracksegment of this instruction. If this
* instruction contains less than 2 points then NaN will be returned or the specified
* instruction will be used if that is the finish instruction.
*/
public static double calcAzimuth(Instruction instruction, Instruction nextI) {
double nextLat;
double nextLon;
if (instruction.getPoints().getSize() >= 2) {
nextLat = instruction.getPoints().getLatitude(1);
nextLon = instruction.getPoints().getLongitude(1);
} else if (nextI != null && instruction.getPoints().getSize() == 1) {
nextLat = nextI.getPoints().getLatitude(0);
nextLon = nextI.getPoints().getLongitude(0);
} else {
return Double.NaN;
}
<|fim▁hole|> double lat = instruction.getPoints().getLatitude(0);
double lon = instruction.getPoints().getLongitude(0);
return AC.calcAzimuth(lat, lon, nextLat, nextLon);
}
}<|fim▁end|> | |
<|file_name|>sql_test.go<|end_file_name|><|fim▁begin|>// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"reflect"
"testing"
"github.com/go-sql-driver/mysql"
)
func TestCreateMySQLConfig(t *testing.T) {
type args struct {
user string
password string
host string
port string
dbName string
mysqlGroupConcatMaxLen string
mysqlExtraParams map[string]string
}
tests := []struct {
name string
args args
want *mysql.Config
}{
{
name: "default config",
args: args{
user: "root",
host: "mysql",
port: "3306",
mysqlGroupConcatMaxLen: "1024",
mysqlExtraParams: nil,
},
want: &mysql.Config{
User: "root",
Net: "tcp",
Addr: "mysql:3306",
Params: map[string]string{"charset": "utf8", "parseTime": "True", "loc": "Local", "group_concat_max_len": "1024"},
AllowNativePasswords: true,
},
},
{
name: "extra parameters",
args: args{
user: "root",
host: "mysql",
port: "3306",
mysqlGroupConcatMaxLen: "1024",
mysqlExtraParams: map[string]string{"tls": "true"},
},
want: &mysql.Config{<|fim▁hole|> User: "root",
Net: "tcp",
Addr: "mysql:3306",
Params: map[string]string{"charset": "utf8", "parseTime": "True", "loc": "Local", "group_concat_max_len": "1024", "tls": "true"},
AllowNativePasswords: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CreateMySQLConfig(tt.args.user, tt.args.password, tt.args.host, tt.args.port, tt.args.dbName, tt.args.mysqlGroupConcatMaxLen, tt.args.mysqlExtraParams); !reflect.DeepEqual(got, tt.want) {
t.Errorf("CreateMySQLConfig() = %#v, want %v", got, tt.want)
}
})
}
}<|fim▁end|> | |
<|file_name|>sc27.js<|end_file_name|><|fim▁begin|>var sc1 = {
//funhouse mirror
setup:function(){
// videoSetup();
tree = new TREE();
tree.generate({
joints: [5,3,1,10],
divs: [1],
start: [0,0,2,0],
angles: [0,Math.PI/2,1],
length: [20,15,4,1],
rads: [1,2,1,3],
width: [1,2,2,1]
});
scene.add(tree);
tree.position.y=-50;
console.log(tree);
var ball = new THREE.SphereGeometry(15,15,15);
var ball2 = new THREE.Geometry();
tree.xform(tree.makeInfo([
[0,0,"all"],{ballGeo:ball,ballGeo2:ball2},
]),tree.setGeo);
tree.xform(tree.makeInfo([
[0,0,"all"],{ty:-15},
]),function(obj,args){obj.children[0].children[0].position.y=7.5;});
// scene.add(tree.makeTubes({minWidth:1,func:function(t){return Math.sin(t)*2}}));
},
draw:function(time){
time=time*3;
tree.position.y = -40+Math.sin(omouseY*Math.PI*4)*3;
tree.xform(tree.makeInfo([
[0,0,[1,5]],{rz:omouseX,ry:omouseY,sc:.9},
//legs
[0,0,0,[0,1],1],{rz:Math.PI/2},
[0,0,0,[0,1],1],{ry:omouseX*3},
[0,0,0,[0,1],2],{rx:omouseY*3},
//feet
[0,0,0,[0,1],0,0,0],{rz:0},
[0,0,0,[0,1],0,0,0],{rx:omouseY*3},
[0,0,[0,4],[0,1],0],{ty:-10},
[0,0,[1,4],[0,1],[1,2]],{rz:mouseY,freq:1,offMult:.2,off:time},
//fingers
[0,0,[1,4],[0,1],0,0,0,[0,2],"all"],{rz:0,freq:1,offMult:.2,off:time},
[0,0,[1,4],[0,1],0,0,0],{rz:0,freq:1,offMult:.3,off:time+.2},
//feet
[0,0,0,[0,1],0,0,0,[0,2],"all"],{ry:0,rz:omouseY*.1,sc:.9},
[0,0,0,0,0,0,0,[0,2],0],{sc:2,ry:-2*omouseY+1.5,rz:1},
[0,0,0,1,0,0,0,[0,2],0],{sc:2,ry:-2*omouseY-1.5,rz:1},
//toes
[0,0,0,0,0,0,0,[0,2],0],{sc:2,ry:0,freq:1,offMult:.2 ,offsetter2:.5},
[0,0,0,1,0,0,0,[0,2],0],{sc:2,ry:Math.PI-.3,freq:1,offMult:.2,offsetter2:.5},
]),tree.transform);
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>ReconnectExecutor.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2019 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.profiler.sender.grpc;
import com.navercorp.pinpoint.common.util.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Executor;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* @author Woonduk Kang(emeroad)
*/
public class ReconnectExecutor {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private volatile boolean shutdown;
private final ScheduledExecutorService scheduledExecutorService;
private final AtomicLong rejectedCounter = new AtomicLong();
public ReconnectExecutor(ScheduledExecutorService scheduledExecutorService) {
this.scheduledExecutorService = Assert.requireNonNull(scheduledExecutorService, "scheduledExecutorService");
}
private void execute0(Runnable command) {
Assert.requireNonNull(command, "command");
if (shutdown) {
logger.debug("already shutdown");
return;
}
if (command instanceof ReconnectJob) {
ReconnectJob reconnectJob = (ReconnectJob) command;
try {
scheduledExecutorService.schedule(reconnectJob, reconnectJob.nextBackoffNanos(), TimeUnit.NANOSECONDS);
} catch (RejectedExecutionException e) {
final long failCount = rejectedCounter.incrementAndGet();
logger.info("{} reconnectJob scheduled fail {}", command, failCount);
}
} else {
throw new IllegalArgumentException("unknown command type " + command);
}
}
public void close() {
shutdown = true;
}
public Reconnector newReconnector(Runnable reconnectJob) {
Assert.requireNonNull(reconnectJob, "reconnectJob");
if (logger.isInfoEnabled()) {
logger.info("newReconnector(reconnectJob = [{}])", reconnectJob);
}
<|fim▁hole|> }
};
final ReconnectJob reconnectJobWrap = wrapReconnectJob(reconnectJob);
return new ReconnectAdaptor(dispatch, reconnectJobWrap);
}
private ReconnectJob wrapReconnectJob(Runnable runnable) {
return new ExponentialBackoffReconnectJob(runnable);
}
}<|fim▁end|> | final Executor dispatch = new Executor() {
@Override
public void execute(Runnable command) {
ReconnectExecutor.this.execute0(command); |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>import datetime
from django.db.models import Q
from django.http import HttpResponse, HttpResponseServerError, Http404, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.template import RequestContext
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.core.exceptions import MultipleObjectsReturned
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms.models import formset_factory, modelformset_factory, inlineformset_factory, BaseModelFormSet
from django.forms import ValidationError
import json as simplejson
from django.utils.datastructures import SortedDict
from django.contrib.auth.forms import UserCreationForm
from django.conf import settings
from django_rea.valueaccounting.models import *
from django_rea.board.forms import *
from django_rea.valueaccounting.views import get_agent
def default_context_agent():
return EconomicAgent.objects.get(id=3) #todo: BIG hack alert!!!!
#todo: a lot of this can be configured instead of hard-coded
def dhen_board(request, context_agent_id=None):
#import pdb; pdb.set_trace()
agent = get_agent(request)
pattern = ProcessPattern.objects.get(name="Herbs")
selected_resource_type = None
#filter_form = FilterForm(pattern=pattern, data=request.POST or None,)
if context_agent_id:
context_agent = EconomicAgent.objects.get(id=context_agent_id)
else:
context_agent = default_context_agent()
seller = EconomicAgent.objects.get(id=4) #todo: even worse hack!!
rec_extype = ExchangeType.objects.get(name="Purchase to Drying Site")
e_date = datetime.date.today()
init = {"start_date": e_date }
available_extype = ExchangeType.objects.get(name="Make Available")
available_form = AvailableForm(initial=init, exchange_type=available_extype, context_agent=context_agent, prefix="AVL")
init = {"event_date": e_date, "paid": "later", }
receive_form = ReceiveForm(initial=init, exchange_type=rec_extype, context_agent=context_agent, prefix="REC")
et = EventType.objects.get(name="Resource Production")
farm_stage = None
#harvester_stage = ExchangeType.objects.get(name="Farm to Harvester")
dryer_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
seller_stage = ExchangeType.objects.get(name="Drying Site to Seller")
rts = pattern.get_resource_types(event_type=et)
for rt in rts:
init = {"event_date": e_date,}
rt.farm_commits = rt.commits_for_exchange_stage(stage=farm_stage)
for com in rt.farm_commits:
if com.start_date > e_date:
com.future = True
prefix = com.form_prefix()
qty_help = " ".join([com.unit_of_quantity.abbrev, ", up to 2 decimal places"])
com.transfer_form = ExchangeFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="DryingSite", context_agent=context_agent, prefix=prefix)
com.zero_form = ZeroOutForm(prefix=prefix)
com.lot_form = NewResourceForm(prefix=prefix)
com.multiple_formset = create_exchange_formset(context_agent=context_agent, assoc_type_identifier="Harvester", prefix=prefix)
rt.dryer_resources = rt.onhand_for_exchange_stage(stage=dryer_stage)
init = {"event_date": e_date, "paid": "later"}
for res in rt.dryer_resources:
prefix = res.form_prefix()
qty_help = " ".join([res.unit_of_quantity().abbrev, ", up to 2 decimal places"])
res.transfer_form = TransferFlowForm(initial=init, qty_help=qty_help, assoc_type_identifier="Seller", context_agent=context_agent, prefix=prefix)
rt.seller_resources = rt.onhand_for_exchange_stage(stage=seller_stage)
if rt.seller_resources:
init_rt = {"event_date": e_date,}
rt.combine_form = CombineResourcesForm(prefix = rt.form_prefix(), initial=init_rt, resource_type=rt, stage=seller_stage)
return render_to_response("board/dhen_board.html", {
"agent": agent,
"context_agent": context_agent,
"seller": seller,
"available_form": available_form,
"receive_form": receive_form,
#"filter_form": filter_form,
"resource_types": rts,
"available_extype": available_extype,
}, context_instance=RequestContext(request))
@login_required
def add_available(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
form = AvailableForm(data=request.POST, prefix="AVL")
if form.is_valid():
commit = form.save(commit=False)
commit.event_type = EventType.objects.get(name="Give")
commit.to_agent = context_agent
commit.context_agent = context_agent
commit.due_date = commit.start_date
commit.commitment_date = commit.start_date
commit.unit_of_quantity = commit.resource_type.unit
commit.exchange_stage = None
commit.created_by = request.user
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def receive_directly(request, context_agent_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
exchange_type = ExchangeType.objects.get(name="Purchase to Drying Site") #todo: odd to have stage different....
form = ReceiveForm(data=request.POST, prefix="REC")
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
identifier = data["identifier"]
from_agent = data["from_agent"]
to_agent = data["to_agent"]
resource_type = data["resource_type"]
quantity = data["quantity"]
description = data["description"]
paid = data["paid"]
value = data["value"]
unit_of_value = data["unit_of_value"]
receive_et = EventType.objects.get(name="Receive")
give_et = EventType.objects.get(name="Give")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
exchange = Exchange(
name="Purchase " + resource_type.name + " from " + from_agent.nick,
use_case=UseCase.objects.get(identifier="supply_xfer"),
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=quantity,
exchange_stage=stage,
notes=description,
created_by=request.user
)
resource.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
xfer.save()
event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource_type,
transfer = xfer,
exchange_stage=stage,
from_agent = from_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource_type.unit,
value = value,
unit_of_value = unit_of_value,
description=description,
created_by = request.user,
)
event.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = pay_xfer,
exchange_stage=stage,
from_agent = event.to_agent,
to_agent = event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource_type.name
pay_xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
notes = description,
created_by = request.user
)
pay_xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=pay_xfer,
exchange_stage=stage,
due_date=event_date,
from_agent=event.to_agent,
to_agent=event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def create_exchange_formset(context_agent, assoc_type_identifier, prefix, data=None):
ExchangeFormSet = formset_factory(MultipleExchangeEventForm, extra=10)
#init = {"paid": "paid"}
formset = ExchangeFormSet(data=data, prefix=prefix)
to_agents = context_agent.all_has_associates_by_type(assoc_type_identifier=assoc_type_identifier)
for form in formset:
#id = int(form["facet_id"].value())
form.fields["to_agent"].queryset = to_agents
form.fields["paid_stage_1"].initial = "never"
form.fields["paid_stage_2"].initial = "later"
return formset
#todo: hardcoded recipe and exchange types
def get_next_stage(exchange_type=None):
if not exchange_type:
next_stage = ExchangeType.objects.get(name="Farm to Harvester")
elif exchange_type.name == "Farm to Harvester":
next_stage = ExchangeType.objects.get(name="Harvester to Drying Site")
elif exchange_type.name == "Harvester to Drying Site":
next_stage = ExchangeType.objects.get(name="Drying Site to Seller")
else:
next_stage = None
return next_stage
@login_required
def purchase_resource(request, context_agent_id, commitment_id): #this is the farm > harvester > drying site, confusing name
if request.method == "POST":
#import pdb; pdb.set_trace()
commitment = get_object_or_404(Commitment, id=commitment_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = None
next_stage = get_next_stage(stage)
next_next_stage = get_next_stage(next_stage)
prefix = commitment.form_prefix()
form = ExchangeFlowForm(prefix=prefix, data=request.POST)
lot_form = NewResourceForm(prefix=prefix, data=request.POST)
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
#import pdb; pdb.set_trace()
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
if form.is_valid() and lot_form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
unit_of_value = data["unit_of_value"]
notes = data["notes"]
lot_data = lot_form.cleaned_data
identifier = lot_data["identifier"]
purch_use_case = UseCase.objects.get(identifier="supply_xfer")
purch_exchange_type = ExchangeType.objects.get(name="Farm to Harvester")
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
xfer_exchange_type = ExchangeType.objects.get(name="Harvester to Drying Site")
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
formset = create_exchange_formset(prefix=prefix, data=request.POST, context_agent=context_agent, assoc_type_identifier="Harvester")
quantity = 0
ces = []
#import pdb; pdb.set_trace()
for form_ee in formset.forms:
if form_ee.is_valid():
data_ee = form_ee.cleaned_data
breakout_to_agent = data_ee["to_agent"]
if breakout_to_agent:
breakout_quantity = data_ee["quantity"]
quantity += breakout_quantity
value_stage_1 = data_ee["value_stage_1"]
paid_stage_1 = data_ee["paid_stage_1"]
value_stage_2 = data_ee["value_stage_2"]
paid_stage_2 = data_ee["paid_stage_2"]
exchange = Exchange(
name="Transfer " + commitment.resource_type.name + " from farm",
use_case=purch_use_case,
exchange_type=purch_exchange_type,
start_date=event_date,
context_agent=context_agent,
created_by=request.user,
)
exchange.save()
resource = EconomicResource(
identifier=commitment.resource_type.name + " from farm",
resource_type=commitment.resource_type,
quantity=0,
exchange_stage=next_next_stage,
created_by=request.user
)
resource.save()
transfer_type = purch_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
receipt_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_stage,
transfer=xfer,
commitment=commitment,
from_agent = commitment.from_agent,
to_agent = breakout_to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
receipt_event.save()
if paid_stage_1 == "paid":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_1 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
exchange_stage=next_stage,
transfer=xfer,
from_agent = receipt_event.to_agent,
to_agent = receipt_event.from_agent,
context_agent = context_agent,
quantity = value_stage_1,
unit_of_quantity = unit_of_value,
value = value_stage_1,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_1.save()
elif paid_stage_1 == "later":
if value_stage_1 > 0:
transfer_type = purch_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_1 = Commitment (
commitment_date=event_date,
event_type=give_et,
exchange_stage=next_stage,
transfer=xfer,
due_date=event_date,
from_agent=receipt_event.to_agent,
to_agent=receipt_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_1,
unit_of_quantity=unit_of_value,
value=value_stage_1,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_1.save()
xfer_exchange = Exchange(
name="Transfer " + commitment.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=xfer_exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = xfer_exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event.save()
xfer_event_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
transfer=xfer,
from_agent = breakout_to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_event_receive.save()
if paid_stage_2 == "paid":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event_2 = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2.save()
pay_event_2_receive = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer = xfer,
exchange_stage=next_next_stage,
from_agent = xfer_event.to_agent,
to_agent = xfer_event.from_agent,
context_agent = context_agent,
quantity = value_stage_2,
unit_of_quantity = unit_of_value,
value = value_stage_2,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event_2_receive.save()
elif paid_stage_2 == "later":
if value_stage_2 > 0:
transfer_type = xfer_exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + commitment.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit_2 = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_next_stage,
due_date=event_date,
from_agent=xfer_event.to_agent,
to_agent=xfer_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value_stage_2,
unit_of_quantity=unit_of_value,
value=value_stage_2,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit_2.save()
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
exchange_stage=next_next_stage,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = breakout_quantity,
unit_of_quantity = resource.resource_type.unit,
created_by = request.user,
)
consume_event.save()
ces.append(consume_event)
process = Process(
name="Combined harvested: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Into Drying Room"),
created_by=request.user,
)
process.save()
for ce in ces:
ce.process = process
ce.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=commitment.resource_type,
quantity=quantity,
exchange_stage=next_next_stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=next_next_stage,
process = process,
from_agent = to_agent,
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,<|fim▁hole|>
#todo: put skip stage here!
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def transfer_resource(request, context_agent_id, resource_id): #this is drying site to seller
if request.method == "POST":
#import pdb; pdb.set_trace()
resource = get_object_or_404(EconomicResource, id=resource_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Harvester to Drying Site")
next_stage = get_next_stage(stage)
prefix = resource.form_prefix()
form = TransferFlowForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
to_agent = data["to_agent"]
quantity = data["quantity"]
value = data["value"]
if not value:
value = 0
unit_of_value = data["unit_of_value"]
paid = data["paid"]
notes = data["notes"]
xfer_use_case = UseCase.objects.get(identifier="intrnl_xfer")
exchange_type = next_stage
give_et = EventType.objects.get(name="Give")
receive_et = EventType.objects.get(name="Receive")
pay_rt = EconomicResourceType.objects.filter(unit__unit_type="value")[0]
#import pdb; pdb.set_trace()
xfer_exchange = Exchange(
name="Transfer " + resource.resource_type.name,
use_case=xfer_use_case,
start_date=event_date,
context_agent=context_agent,
exchange_type=exchange_type,
created_by=request.user,
)
xfer_exchange.save()
transfer_type = exchange_type.transfer_types_non_reciprocal()[0]
xfer_name = transfer_type.name + " of " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
xfer_give_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_give_event.save()
xfer_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource = resource,
resource_type = resource.resource_type,
transfer=xfer,
exchange_stage=next_stage,
from_agent = resource.owner_based_on_exchange(),
to_agent = to_agent,
context_agent = context_agent,
quantity = quantity,
unit_of_quantity = resource.resource_type.unit,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
xfer_rec_event.save()
resource.exchange_stage = next_stage
resource.quantity = quantity
if resource.notes:
resource.notes = resource.notes + " ------- " + notes
else:
resource.notes = notes
resource.save()
if paid == "paid":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
pay_event = EconomicEvent(
event_type = give_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
pay_rec_event = EconomicEvent(
event_type = receive_et,
event_date = event_date,
resource_type = pay_rt,
transfer=xfer,
exchange_stage=next_stage,
from_agent = xfer_give_event.to_agent,
to_agent = xfer_give_event.from_agent,
context_agent = context_agent,
quantity = value,
unit_of_quantity = unit_of_value,
value = value,
unit_of_value = unit_of_value,
created_by = request.user,
)
pay_event.save()
elif paid == "later":
if value > 0:
transfer_type = exchange_type.transfer_types_reciprocal()[0]
xfer_name = transfer_type.name + " for " + resource.resource_type.name
xfer = Transfer(
name=xfer_name,
transfer_type = transfer_type,
exchange = xfer_exchange,
context_agent = context_agent,
transfer_date = event_date,
created_by = request.user
)
xfer.save()
commit = Commitment (
commitment_date=event_date,
event_type=give_et,
transfer=xfer,
exchange_stage=next_stage,
due_date=event_date,
from_agent=xfer_give_event.to_agent,
to_agent=xfer_give_event.from_agent,
context_agent=context_agent,
resource_type=pay_rt,
quantity=value,
unit_of_quantity=unit_of_value,
value=value,
unit_of_value=unit_of_value,
created_by=request.user,
)
commit.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
def combine_resources(request, context_agent_id, resource_type_id):
if request.method == "POST":
#import pdb; pdb.set_trace()
resource_type = get_object_or_404(EconomicResourceType, id=resource_type_id)
context_agent = EconomicAgent.objects.get(id=context_agent_id)
stage = ExchangeType.objects.get(name="Drying Site to Seller") #actually the stage here should be the process stage, and the rest should handle that
prefix = resource_type.form_prefix()
form = CombineResourcesForm(prefix=prefix, data=request.POST)
if form.is_valid():
data = form.cleaned_data
event_date = data["event_date"]
resources = data["resources"]
identifier = data["identifier"]
notes = data["notes"]
proc_use_case = UseCase.objects.get(identifier="rand")
proc_pattern = None
proc_patterns = [puc.pattern for puc in proc_use_case.patterns.all()]
if proc_patterns:
proc_pattern = proc_patterns[0]
consume_et = EventType.objects.get(name="Resource Consumption")
produce_et = EventType.objects.get(name="Resource Production")
if resources:
process = Process(
name="Combined: new lot",
process_pattern=proc_pattern,
end_date=event_date,
start_date=event_date,
started=event_date,
context_agent=context_agent,
finished=True,
process_type=ProcessType.objects.get(name="Combine Lots"),
created_by=request.user,
)
process.save()
qty = 0
for res in resources:
consume_event = EconomicEvent(
event_type = consume_et,
event_date = event_date,
resource = res,
resource_type = res.resource_type,
process=process,
exchange_stage=stage,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = res.quantity,
unit_of_quantity = res.resource_type.unit,
created_by = request.user,
)
consume_event.save()
qty += res.quantity
res.quantity = 0
res.save()
prod_resource = EconomicResource(
identifier=identifier,
resource_type=resource_type,
quantity=qty,
exchange_stage=stage,
notes=notes,
created_by=request.user
)
prod_resource.save()
prod_event = EconomicEvent(
event_type = produce_et,
event_date = event_date,
resource = prod_resource,
resource_type = prod_resource.resource_type,
exchange_stage=stage,
process = process,
from_agent = res.owner_based_on_exchange(),
to_agent = res.owner_based_on_exchange(),
context_agent = context_agent,
quantity = qty,
unit_of_quantity = prod_resource.resource_type.unit,
description=notes,
created_by = request.user,
)
prod_event.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def change_available(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if request.method == "POST":
prefix = commitment.form_prefix()
form = CommitmentForm(instance=commitment, data=request.POST, prefix=prefix)
if form.is_valid():
data = form.cleaned_data
form.save()
commitment.unit_of_quantity = commitment.resource_type.unit
commitment.save()
zero_form = ZeroOutForm(prefix=prefix, data=request.POST)
if zero_form.is_valid():
zero_data = zero_form.cleaned_data
zero_out = zero_data["zero_out"]
if zero_out == True:
commitment.finished = True
commitment.save()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def delete_farm_commitment(request, commitment_id):
commitment = get_object_or_404(Commitment, pk=commitment_id)
context_agent_id = commitment.context_agent.id
if commitment.is_deletable():
commitment.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col2(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
for item in flows:
if item.class_label() == "Economic Event":
if item.commitment:
commit = item.commitment
commit.finished = False
commit.save()
item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))
@login_required
def undo_col3(request, resource_id):
resource = get_object_or_404(EconomicResource, pk=resource_id)
context_agent_id = default_context_agent().id
#import pdb; pdb.set_trace()
flows = resource.incoming_value_flows()
#todo: I'm not sure how to delete the right rows without going too far back in the chain......
#for item in flows:
# if item.class_label() == "Economic Event":
# item.delete()
return HttpResponseRedirect('/%s/%s/'
% ('board/dhen-board', context_agent_id))<|fim▁end|> | created_by = request.user,
)
prod_event.save() |
<|file_name|>ssh.js<|end_file_name|><|fim▁begin|>var Bluebird = require('bluebird');<|fim▁hole|>var connection = require('./connection');
var errors = require('./errors');
var mockOptions = {};
var offlineMode = false;
module.exports.errors = errors;
module.exports.connect = function connect(options) {
if (offlineMode) {
throw new Error('Real connections to ' + options.host + ' are not allowed in offline mode');
}
return new connection.Connection(options).connect();
};
module.exports.connectMock = function connectMock(options) {
return new connection.MockConnection(options, mockOptions).connect();
};
module.exports.setMockOptions = function setMockOptions(options) {
mockOptions = options;
};
module.exports.setOfflineMode = function setOfflineMode(value) {
offlineMode = !!value;
};<|fim▁end|> | |
<|file_name|>Markers.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2008 Ayman Al-Sairafi [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License
* at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jsyntaxpane.components;
import jsyntaxpane.actions.*;
import java.awt.Color;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.swing.text.BadLocationException;
import javax.swing.text.DefaultHighlighter;
import javax.swing.text.Highlighter;
import javax.swing.text.JTextComponent;
import jsyntaxpane.SyntaxDocument;
import jsyntaxpane.Token;
/**
* This class contains static utility methods to make highliting in text
* components easier.
*
* @author Ayman Al-Sairafi
*/
public class Markers {
// This subclass is used in our highlighting code
public static class SimpleMarker extends DefaultHighlighter.DefaultHighlightPainter {
public SimpleMarker(Color color) {
super(color);
}
}
/**
* Removes only our private highlights
* This is public so that we can remove the highlights when the editorKit
* is unregistered. SimpleMarker can be null, in which case all instances of
* our Markers are removed.
* @param component the text component whose markers are to be removed
* @param marker the SimpleMarker to remove
*/
public static void removeMarkers(JTextComponent component, SimpleMarker marker) {
Highlighter hilite = component.getHighlighter();
Highlighter.Highlight[] hilites = hilite.getHighlights();
for (int i = 0; i < hilites.length; i++) {
if (hilites[i].getPainter() instanceof SimpleMarker) {<|fim▁hole|> }
}
}
/**
* Remove all the markers from an JEditorPane
* @param editorPane
*/
public static void removeMarkers(JTextComponent editorPane) {
removeMarkers(editorPane, null);
}
/**
* add highlights for the given Token on the given pane
* @param pane
* @param token
* @param marker
*/
public static void markToken(JTextComponent pane, Token token, SimpleMarker marker) {
markText(pane, token.start, token.end(), marker);
}
/**
* add highlights for the given region on the given pane
* @param pane
* @param start
* @param end
* @param marker
*/
public static void markText(JTextComponent pane, int start, int end, SimpleMarker marker) {
try {
Highlighter hiliter = pane.getHighlighter();
int selStart = pane.getSelectionStart();
int selEnd = pane.getSelectionEnd();
// if there is no selection or selection does not overlap
if(selStart == selEnd || end < selStart || start > selStart) {
hiliter.addHighlight(start, end, marker);
return;
}
// selection starts within the highlight, highlight before slection
if(selStart > start && selStart < end ) {
hiliter.addHighlight(start, selStart, marker);
}
// selection ends within the highlight, highlight remaining
if(selEnd > start && selEnd < end ) {
hiliter.addHighlight(selEnd, end, marker);
}
} catch (BadLocationException ex) {
// nothing we can do if the request is out of bound
LOG.log(Level.SEVERE, null, ex);
}
}
/**
* Mark all text in the document that matches the given pattern
* @param pane control to use
* @param pattern pattern to match
* @param marker marker to use for highlighting
*/
public static void markAll(JTextComponent pane, Pattern pattern, SimpleMarker marker) {
SyntaxDocument sDoc = ActionUtils.getSyntaxDocument(pane);
if(sDoc == null || pattern == null) {
return;
}
Matcher matcher = sDoc.getMatcher(pattern);
// we may not have any matcher (due to undo or something, so don't do anything.
if(matcher==null) {
return;
}
while(matcher.find()) {
markText(pane, matcher.start(), matcher.end(), marker);
}
}
private static final Logger LOG = Logger.getLogger(Markers.class.getName());
}<|fim▁end|> | SimpleMarker hMarker = (SimpleMarker) hilites[i].getPainter();
if (marker == null || hMarker.equals(marker)) {
hilite.removeHighlight(hilites[i]);
} |
<|file_name|>test_plugin_slurm.py<|end_file_name|><|fim▁begin|># pylint: disable=missing-docstring
# this fails on Python 2.6 but Slurm environment is 2.7
import unittest
from datetime import datetime
from reporting.plugins.slurm import SlurmInput
class SlurmTestCase(unittest.TestCase):
"""Test cases for slurm module"""
def test_all_heros(self):
"""Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
for job in data['jobs']:
self.assertTrue(job['user'].startswith('hero'))
def test_get_data(self):
"""Slurm plugin: get_data method should return a message in correct structure"""
slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt')
data = slurm_input.get_data()
self.assertIn('hostname', data)
self.assertIn('timestamp', data)
self.assertIn('jobs', data)
self.assertTrue(isinstance(data['jobs'], list))
job = data['jobs'][0]
for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'):
self.assertIn(required_key, job)
def test_read_data(self):
"""Slurm plugin: _read_data should only return job summary not steps, those do not have User value"""
data = SlurmInput._read_data('tests/sacct-with-start-end.txt')
qualified_count = len(data)
for message in data:<|fim▁hole|> self.assertEqual(qualified_count, 0)
def test_convert_to_timestamp(self):
"""Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly"""
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
reference = datetime.utcnow().strftime(ISO_FORMAT)
converted = datetime.utcfromtimestamp(
SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT)
self.assertEqual(reference, converted)<|fim▁end|> | if 'user' in message and len(message['user'].strip()):
qualified_count -= 1 |
<|file_name|>test3.rs<|end_file_name|><|fim▁begin|>extern crate blurz;
use std::error::Error;
use std::time::Duration;
use std::thread;
use blurz::bluetooth_adapter::BluetoothAdapter as Adapter;
use blurz::bluetooth_device::BluetoothDevice as Device;
use blurz::bluetooth_discovery_session::BluetoothDiscoverySession as DiscoverySession;
fn test3() -> Result<(), Box<Error>> {
let adapter: Adapter = try!(Adapter::init());
try!(adapter.set_powered(true));
loop {
let session = try!(DiscoverySession::create_session(adapter.get_id()));
thread::sleep(Duration::from_millis(200));
try!(session.start_discovery());
thread::sleep(Duration::from_millis(800));
let devices = try!(adapter.get_device_list());
println!("{} device(s) found", devices.len());
'device_loop: for d in devices {
let device = Device::new(d.clone());
println!("{} {:?} {:?}", device.get_id(), device.get_address(),device.get_rssi());
try!(adapter.remove_device(device.get_id()));
}
try!(session.stop_discovery());
}
}
fn main() {<|fim▁hole|> Ok(_) => (),
Err(e) => println!("{:?}", e),
}
}<|fim▁end|> | match test3() { |
<|file_name|>gyp_flag_compare.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given the output of -t commands from a ninja build for a gyp and GN generated
build, report on differences between the command lines."""
import os
import shlex
import subprocess
import sys
# Must be in src/.
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
g_total_differences = 0
def FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
def MergeSpacedArgs(command_line, argname):
"""Combine all arguments |argname| with their values, separated by a space."""
i = 0
result = []
while i < len(command_line):
arg = command_line[i]
if arg == argname:
result.append(arg + ' ' + command_line[i + 1])
i += 1
else:
result.append(arg)
i += 1
return result
def NormalizeSymbolArguments(command_line):
"""Normalize -g arguments.
If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
Modifies |command_line| in place.
"""
# Strip -g0 if there's no symbols.
have_some_symbols = False
for x in command_line:
if x.startswith('-g') and x != '-g0':
have_some_symbols = True
if not have_some_symbols and '-g0' in command_line:
command_line.remove('-g0')
# Rename -g2 to -g.
if '-g2' in command_line:
command_line[index('-g2')] = '-g'
def GetFlags(lines):
"""Turn a list of command lines into a semi-structured dict."""
flags_by_output = {}
for line in lines:
# TODO(scottmg): Hacky way of getting only cc for now.
if 'clang' not in line:
continue
command_line = shlex.split(line.strip())[1:]
output_name = FindAndRemoveArgWithValue(command_line, '-o')
dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
NormalizeSymbolArguments(command_line)
command_line = MergeSpacedArgs(command_line, '-Xclang')
defines = [x for x in command_line if x.startswith('-D')]
include_dirs = [x for x in command_line if x.startswith('-I')]
dash_f = [x for x in command_line if x.startswith('-f')]
warnings = [x for x in command_line if x.startswith('-W')]
cc_file = [x for x in command_line if x.endswith('.cc') or
x.endswith('.c') or
x.endswith('.cpp')]
if len(cc_file) != 1:
print 'Skipping %s' % command_line
continue
assert len(cc_file) == 1
others = [x for x in command_line if x not in defines and \
x not in include_dirs and \
x not in dash_f and \
x not in warnings and \
x not in cc_file]
# Filter for libFindBadConstructs.so having a relative path in one and
# absolute path in the other.
others_filtered = []
for x in others:
if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
others_filtered.append(
'-Xclang ' +
os.path.join(os.getcwd(),
os.path.normpath(
os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
elif x.startswith('-B'):
others_filtered.append(
'-B' +
os.path.join(os.getcwd(),
os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
else:
others_filtered.append(x)
others = others_filtered
flags_by_output[cc_file[0]] = {
'output': output_name,
'depname': dep_name,
'defines': sorted(defines),
'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
'dash_f': sorted(dash_f),
'warnings': sorted(warnings),
'other': sorted(others),
}
return flags_by_output
def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
"""Return a report of any differences between gyp and gn lists, ignoring
anything in |dont_care_{gyp|gn}| respectively."""
global g_total_differences
if not dont_care_gyp:
dont_care_gyp = []
if not dont_care_gn:
dont_care_gn = []
output = ''
if gyp[name] != gn[name]:
gyp_set = set(gyp[name])
gn_set = set(gn[name])
missing_in_gyp = gyp_set - gn_set
missing_in_gn = gn_set - gyp_set
missing_in_gyp -= set(dont_care_gyp)
missing_in_gn -= set(dont_care_gn)
if missing_in_gyp or missing_in_gn:
output += ' %s differ:\n' % name
if missing_in_gyp:
output += ' In gyp, but not in GN:\n %s' % '\n '.join(
sorted(missing_in_gyp)) + '\n'
g_total_differences += len(missing_in_gyp)
if missing_in_gn:
output += ' In GN, but not in gyp:\n %s' % '\n '.join(
sorted(missing_in_gn)) + '\n\n'
g_total_differences += len(missing_in_gn)
return output
def Run(command_line):
"""Run |command_line| as a subprocess and return stdout. Raises on error."""
return subprocess.check_output(command_line, shell=True)
def main():
if len(sys.argv) != 2 and len(sys.argv) != 3:
print 'usage: %s gyp_target gn_target' % __file__
print ' or: %s target' % __file__
return 1
if len(sys.argv) == 2:
sys.argv.append(sys.argv[1])
print >>sys.stderr, 'Regenerating...'
# Currently only Release, non-component.
Run('gn gen out/gn_flags --args="is_debug=false is_component_build=false"')
os.environ.pop('GYP_DEFINES', None)
Run('python build/gyp_chromium -Goutput_dir=out_gyp_flags -Gconfig=Release')
gn = Run('ninja -C out/gn_flags -t commands %s' % sys.argv[2])
gyp = Run('ninja -C out_gyp_flags/Release -t commands %s' % sys.argv[1])
all_gyp_flags = GetFlags(gyp.splitlines())
all_gn_flags = GetFlags(gn.splitlines())
gyp_files = set(all_gyp_flags.keys())
gn_files = set(all_gn_flags.keys())
different_source_list = gyp_files != gn_files
if different_source_list:
print 'Different set of sources files:'
print ' In gyp, not in GN:\n %s' % '\n '.join(
sorted(gyp_files - gn_files))
print ' In GN, not in gyp:\n %s' % '\n '.join(<|fim▁hole|> for filename in sorted(file_list):
gyp_flags = all_gyp_flags[filename]
gn_flags = all_gn_flags[filename]
differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
differences += CompareLists(gyp_flags, gn_flags, 'defines')
differences += CompareLists(gyp_flags, gn_flags, 'include_dirs')
differences += CompareLists(gyp_flags, gn_flags, 'warnings', dont_care_gn=[
# More conservative warnings in GN we consider to be OK.
'-Wendif-labels',
'-Wextra',
'-Wsign-compare',
])
differences += CompareLists(gyp_flags, gn_flags, 'other')
if differences:
files_with_given_differences.setdefault(differences, []).append(filename)
for diff, files in files_with_given_differences.iteritems():
print '\n'.join(sorted(files))
print diff
print 'Total differences:', g_total_differences
# TODO(scottmg): Return failure on difference once we're closer to identical.
return 0
if __name__ == '__main__':
sys.exit(main())<|fim▁end|> | sorted(gn_files - gyp_files))
print '\nNote that flags will only be compared for files in both sets.\n'
file_list = gyp_files & gn_files
files_with_given_differences = {} |
<|file_name|>_visualizer.py<|end_file_name|><|fim▁begin|># ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
import os
import pkg_resources
import shutil
from urllib.parse import quote
import scipy
import numpy as np
import pandas as pd
import qiime2
from statsmodels.sandbox.stats.multicomp import multipletests
import q2templates
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_alpha')
def alpha_group_significance(output_dir: str, alpha_diversity: pd.Series,
metadata: qiime2.Metadata) -> None:
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(exclude=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_numeric_categories = pre_filtered_cols - post_filtered_cols
filtered_group_comparisons = []
categories = metadata_df.columns
metric_name = alpha_diversity.name
if len(categories) == 0:
raise ValueError('Only numeric data is present in metadata file.')
filenames = []
filtered_categories = []
for category in categories:
metadata_category = metadata.get_category(category).to_series()
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.replace(r'', np.nan).dropna()
initial_data_length = alpha_diversity.shape[0]
data = pd.concat([alpha_diversity, metadata_category], axis=1,
join='inner')
filtered_data_length = data.shape[0]
names = []
groups = []
for name, group in data.groupby(metadata_category.name):
names.append('%s (n=%d)' % (name, len(group)))
groups.append(list(group[alpha_diversity.name]))
if (len(groups) > 1 and len(groups) != len(data.index)):
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
# perform Kruskal-Wallis across all groups
kw_H_all, kw_p_all = scipy.stats.mstats.kruskalwallis(*groups)
# perform pairwise Kruskal-Wallis across all pairs of groups and
# correct for multiple comparisons
kw_H_pairwise = []
for i in range(len(names)):
for j in range(i):
try:
H, p = scipy.stats.mstats.kruskalwallis(groups[i],
groups[j])
kw_H_pairwise.append([names[j], names[i], H, p])
except ValueError:<|fim▁hole|> kw_H_pairwise, columns=['Group 1', 'Group 2', 'H', 'p-value'])
kw_H_pairwise.set_index(['Group 1', 'Group 2'], inplace=True)
kw_H_pairwise['q-value'] = multipletests(
kw_H_pairwise['p-value'], method='fdr_bh')[1]
kw_H_pairwise.sort_index(inplace=True)
pairwise_fn = 'kruskal-wallis-pairwise-%s.csv' % escaped_category
pairwise_path = os.path.join(output_dir, pairwise_fn)
kw_H_pairwise.to_csv(pairwise_path)
with open(os.path.join(output_dir, filename), 'w') as fh:
df = pd.Series(groups, index=names)
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump({'initial': initial_data_length,
'filtered': filtered_data_length}, fh)
fh.write(",")
json.dump({'H': kw_H_all, 'p': kw_p_all}, fh)
fh.write(",'")
table = kw_H_pairwise.to_html(classes="table table-striped "
"table-hover")
table = table.replace('border="1"', 'border="0"')
fh.write(table.replace('\n', ''))
fh.write("','%s', '%s');" % (quote(pairwise_fn), metric_name))
else:
filtered_categories.append(category)
index = os.path.join(
TEMPLATES, 'alpha_group_significance_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_numeric_categories': ', '.join(filtered_numeric_categories),
'filtered_categories': ', '.join(filtered_categories),
'filtered_group_comparisons':
'; '.join([' vs '.join(e) for e in filtered_group_comparisons])})
shutil.copytree(
os.path.join(TEMPLATES, 'alpha_group_significance_assets', 'dst'),
os.path.join(output_dir, 'dist'))
_alpha_correlation_fns = {'spearman': scipy.stats.spearmanr,
'pearson': scipy.stats.pearsonr}
def alpha_correlation(output_dir: str,
alpha_diversity: pd.Series,
metadata: qiime2.Metadata,
method: str='spearman') -> None:
try:
alpha_correlation_fn = _alpha_correlation_fns[method]
except KeyError:
raise ValueError('Unknown alpha correlation method %s. The available '
'options are %s.' %
(method, ', '.join(_alpha_correlation_fns.keys())))
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(include=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_categories = pre_filtered_cols - post_filtered_cols
categories = metadata_df.columns
if len(categories) == 0:
raise ValueError('Only non-numeric data is present in metadata file.')
filenames = []
for category in categories:
metadata_category = metadata_df[category]
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.dropna()
# create a dataframe containing the data to be correlated, and drop
# any samples that have no data in either column
df = pd.concat([metadata_category, alpha_diversity], axis=1,
join='inner')
# compute correlation
correlation_result = alpha_correlation_fn(df[metadata_category.name],
df[alpha_diversity.name])
warning = None
if alpha_diversity.shape[0] != df.shape[0]:
warning = {'initial': alpha_diversity.shape[0],
'method': method.title(),
'filtered': df.shape[0]}
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
with open(os.path.join(output_dir, filename), 'w') as fh:
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump(warning, fh)
fh.write(",")
json.dump({
'method': method.title(),
'testStat': '%1.4f' % correlation_result[0],
'pVal': '%1.4f' % correlation_result[1],
'sampleSize': df.shape[0]}, fh)
fh.write(");")
index = os.path.join(TEMPLATES, 'alpha_correlation_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_categories': ', '.join(filtered_categories)})
shutil.copytree(os.path.join(TEMPLATES, 'alpha_correlation_assets', 'dst'),
os.path.join(output_dir, 'dist'))<|fim▁end|> | filtered_group_comparisons.append(
['%s:%s' % (category, names[i]),
'%s:%s' % (category, names[j])])
kw_H_pairwise = pd.DataFrame( |
<|file_name|>PredicateWordRule.java<|end_file_name|><|fim▁begin|>/*
* Created on May 13, 2003
*========================================================================
* Modifications history
*========================================================================
* $Log: PredicateWordRule.java,v $
* Revision 1.2 2003/05/30 20:53:09 agfitzp
* 0.0.2 : Outlining is now done as the user types. Some other bug fixes.
*
*========================================================================
*/
package net.sourceforge.jseditor.editors;
import org.eclipse.jface.text.rules.ICharacterScanner;
import org.eclipse.jface.text.rules.IPredicateRule;
import org.eclipse.jface.text.rules.IToken;
import org.eclipse.jface.text.rules.Token;
import org.eclipse.jface.text.rules.WordRule;
import org.eclipse.jface.text.rules.IWordDetector;
/**
* @author fitzpata
*/
public class PredicateWordRule extends WordRule implements IPredicateRule {
/* (non-Javadoc)
* @see org.eclipse.jface.text.rules.IPredicateRule#getSuccessToken()
*/
protected IToken successToken = Token.UNDEFINED;
public void addWords(String[] tokens, IToken token)
{
for (int i = 0; i < tokens.length; i++) {
addWord(tokens[i], token);
}
}
public IToken getSuccessToken() {
return successToken;
}
/* (non-Javadoc)
* @see org.eclipse.jface.text.rules.IPredicateRule#evaluate(org.eclipse.jface.text.rules.ICharacterScanner, boolean)
*/<|fim▁hole|> successToken = this.evaluate(scanner, resume);//true);
return successToken;
}
/**
* Creates a rule which, with the help of an word detector, will return the token
* associated with the detected word. If no token has been associated, the scanner
* will be rolled back and an undefined token will be returned in order to allow
* any subsequent rules to analyze the characters.
*
* @param detector the word detector to be used by this rule, may not be <code>null</code>
*
* @see #addWord
*/
public PredicateWordRule(IWordDetector detector) {
super(detector);
}
/**
* Creates a rule which, with the help of an word detector, will return the token
* associated with the detected word. If no token has been associated, the
* specified default token will be returned.
*
* @param detector the word detector to be used by this rule, may not be <code>null</code>
* @param defaultToken the default token to be returned on success
* if nothing else is specified, may not be <code>null</code>
*
* @see #addWord
*/
public PredicateWordRule(IWordDetector detector, IToken defaultToken) {
super(detector, defaultToken);
}
public PredicateWordRule(IWordDetector detector, String tokenString, IToken tokenType) {
super(detector);
this.addWord(tokenString, tokenType);
}
public PredicateWordRule(IWordDetector detector, String[] tokens, IToken tokenType) {
super(detector);
this.addWords(tokens, tokenType);
}
public PredicateWordRule(IWordDetector detector, IToken defaultToken, String[] tokens, IToken tokenType) {
super(detector, defaultToken);
this.addWords(tokens, tokenType);
}
}<|fim▁end|> | public IToken evaluate(ICharacterScanner scanner, boolean resume) { |
<|file_name|>messages.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.begin()
frappe.db.sql("""UPDATE `tabCommunication` set seen = 1
where
communication_type in ('Chat', 'Notification')
and reference_doctype = 'User'
and reference_name = %s""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.local.flags.commit = True
if frappe.form_dict['contact'] == frappe.session['user']:
# return messages
return frappe.db.sql("""select * from `tabCommunication`
where
communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and (owner=%(contact)s
or reference_name=%(user)s
or owner=reference_name)
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabCommunication`
where<|fim▁hole|> communication_type in ('Chat', 'Notification')
and reference_doctype ='User'
and ((owner=%(contact)s and reference_name=%(user)s)
or (owner=%(contact)s and reference_name=%(contact)s))
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
"""post message"""
d = frappe.new_doc('Communication')
d.communication_type = 'Notification' if parenttype else 'Chat'
d.subject = subject
d.content = txt
d.reference_doctype = 'User'
d.reference_name = contact
d.sender = frappe.session.user
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Communication", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass<|fim▁end|> | |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// @adjivas - github.com/adjivas. See the LICENSE<|fim▁hole|>// This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_use]
extern crate clap;
extern crate pasteur;
/// Default* const arguments defined by CLI.
const DEFAULT_TEMPLATE: &'static str = "etc/templates";
const DEFAULT_LOCALE: &'static str = "etc/locales";
const DEFAULT_STYLE: &'static str = "etc/stylesheets";
const DEFAULT_CERT: &'static str = "etc/ca/cert.pem";
const DEFAULT_KEY: &'static str = "etc/ca/key.pem";
const DEFAULT_PROTOCOL: &'static str = "https";
const DEFAULT_ADDRESS: &'static str = "localhost";
const DEFAULT_SOCKET: &'static str = "3000";
/// The `main` function parses the arguments and
/// instantiates the (http | https) server.
pub fn main () {
let yaml = load_yaml!("cli.yml");
let options = clap::App::from_yaml(yaml).get_matches();
pasteur::new (
options.value_of("template").unwrap_or(DEFAULT_TEMPLATE),
options.value_of("locale").unwrap_or(DEFAULT_LOCALE),
options.value_of("style").unwrap_or(DEFAULT_STYLE),
options.value_of("cert").unwrap_or(DEFAULT_CERT),
options.value_of("key").unwrap_or(DEFAULT_KEY),
pasteur::protocol::Protocol::from_str (
options.value_of("protocol").unwrap_or(DEFAULT_PROTOCOL)
).unwrap(),
&format!("{}:{}",
options.value_of("address").unwrap_or(DEFAULT_ADDRESS),
options.value_of("socket").unwrap_or(DEFAULT_SOCKET)
),
);
}<|fim▁end|> | // file at the top-level directory of this distribution and at
// https://github.com/adjivas/pasteur
// |
<|file_name|>package-info.java<|end_file_name|><|fim▁begin|><|fim▁hole|>@org.osgi.annotation.bundle.Export
@org.osgi.annotation.versioning.Version("2.0.0")
package bndtools.editor.model;<|fim▁end|> | |
<|file_name|>event.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::EventBinding;
use dom::bindings::codegen::Bindings::EventBinding::{EventConstants, EventMethods};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{MutNullableJS, JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use dom::eventtarget::{EventTarget, EventTargetHelpers};
use util::str::DOMString;
use std::borrow::ToOwned;
use std::cell::Cell;
use std::default::Default;
use time;
#[jstraceable]
#[derive(Copy)]
pub enum EventPhase {
None = EventConstants::NONE as int,
Capturing = EventConstants::CAPTURING_PHASE as int,
AtTarget = EventConstants::AT_TARGET as int,
Bubbling = EventConstants::BUBBLING_PHASE as int,
}
#[derive(PartialEq)]
#[jstraceable]
pub enum EventTypeId {
CustomEvent,
HTMLEvent,
KeyboardEvent,
MessageEvent,
MouseEvent,
ProgressEvent,
UIEvent,
ErrorEvent
}
#[derive(PartialEq)]
pub enum EventBubbles {
Bubbles,
DoesNotBubble
}
#[derive(PartialEq)]
pub enum EventCancelable {
Cancelable,
NotCancelable
}
#[dom_struct]
pub struct Event {
reflector_: Reflector,
type_id: EventTypeId,
current_target: MutNullableJS<EventTarget>,
target: MutNullableJS<EventTarget>,
type_: DOMRefCell<DOMString>,
phase: Cell<EventPhase>,
canceled: Cell<bool>,
stop_propagation: Cell<bool>,
stop_immediate: Cell<bool>,
cancelable: Cell<bool>,
bubbles: Cell<bool>,
trusted: Cell<bool>,
dispatching: Cell<bool>,
initialized: Cell<bool>,
timestamp: u64,
}
impl Event {
pub fn new_inherited(type_id: EventTypeId) -> Event {
Event {
reflector_: Reflector::new(),
type_id: type_id,
current_target: Default::default(),
target: Default::default(),
phase: Cell::new(EventPhase::None),
type_: DOMRefCell::new("".to_owned()),
canceled: Cell::new(false),
cancelable: Cell::new(false),
bubbles: Cell::new(false),
trusted: Cell::new(false),
dispatching: Cell::new(false),
stop_propagation: Cell::new(false),
stop_immediate: Cell::new(false),
initialized: Cell::new(false),
timestamp: time::get_time().sec as u64,
}
}
pub fn new_uninitialized(global: GlobalRef) -> Temporary<Event> {
reflect_dom_object(box Event::new_inherited(EventTypeId::HTMLEvent),
global,
EventBinding::Wrap)
}
pub fn new(global: GlobalRef,
type_: DOMString,
bubbles: EventBubbles,
cancelable: EventCancelable) -> Temporary<Event> {
let event = Event::new_uninitialized(global).root();
event.r().InitEvent(type_, bubbles == EventBubbles::Bubbles, cancelable == EventCancelable::Cancelable);
Temporary::from_rooted(event.r())
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &EventBinding::EventInit) -> Fallible<Temporary<Event>> {
let bubbles = if init.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.cancelable { EventCancelable::Cancelable } else { EventCancelable::NotCancelable };
Ok(Event::new(global, type_, bubbles, cancelable))
}
#[inline]
pub fn type_id<'a>(&'a self) -> &'a EventTypeId {
&self.type_id
}
#[inline]
pub fn clear_current_target(&self) {
self.current_target.clear();
}
#[inline]
pub fn set_current_target(&self, val: JSRef<EventTarget>) {
self.current_target.assign(Some(val));
}
#[inline]
pub fn set_target(&self, val: JSRef<EventTarget>) {
self.target.assign(Some(val));
}
#[inline]<|fim▁hole|> pub fn set_phase(&self, val: EventPhase) {
self.phase.set(val)
}
#[inline]
pub fn stop_propagation(&self) -> bool {
self.stop_propagation.get()
}
#[inline]
pub fn stop_immediate(&self) -> bool {
self.stop_immediate.get()
}
#[inline]
pub fn bubbles(&self) -> bool {
self.bubbles.get()
}
#[inline]
pub fn dispatching(&self) -> bool {
self.dispatching.get()
}
#[inline]
pub fn set_dispatching(&self, val: bool) {
self.dispatching.set(val)
}
#[inline]
pub fn initialized(&self) -> bool {
self.initialized.get()
}
}
impl<'a> EventMethods for JSRef<'a, Event> {
fn EventPhase(self) -> u16 {
self.phase.get() as u16
}
fn Type(self) -> DOMString {
self.type_.borrow().clone()
}
fn GetTarget(self) -> Option<Temporary<EventTarget>> {
self.target.get()
}
fn GetCurrentTarget(self) -> Option<Temporary<EventTarget>> {
self.current_target.get()
}
fn DefaultPrevented(self) -> bool {
self.canceled.get()
}
fn PreventDefault(self) {
if self.cancelable.get() {
self.canceled.set(true)
}
}
fn StopPropagation(self) {
self.stop_propagation.set(true);
}
fn StopImmediatePropagation(self) {
self.stop_immediate.set(true);
self.stop_propagation.set(true);
}
fn Bubbles(self) -> bool {
self.bubbles.get()
}
fn Cancelable(self) -> bool {
self.cancelable.get()
}
fn TimeStamp(self) -> u64 {
self.timestamp
}
fn InitEvent(self,
type_: DOMString,
bubbles: bool,
cancelable: bool) {
if self.dispatching.get() {
return;
}
self.initialized.set(true);
self.stop_propagation.set(false);
self.stop_immediate.set(false);
self.canceled.set(false);
self.trusted.set(false);
self.target.clear();
*self.type_.borrow_mut() = type_;
self.bubbles.set(bubbles);
self.cancelable.set(cancelable);
}
fn IsTrusted(self) -> bool {
self.trusted.get()
}
}
pub trait EventHelpers {
fn set_trusted(self, trusted: bool);
fn fire(self, target: JSRef<EventTarget>);
}
impl<'a> EventHelpers for JSRef<'a, Event> {
fn set_trusted(self, trusted: bool) {
self.trusted.set(trusted);
}
// https://html.spec.whatwg.org/multipage/webappapis.html#fire-a-simple-event
fn fire(self, target: JSRef<EventTarget>) {
self.set_trusted(true);
target.dispatch_event(self);
}
}<|fim▁end|> | |
<|file_name|>pipeline_builder.cpp<|end_file_name|><|fim▁begin|>#include "pipeline_builder.hpp"
#include "pipeline_state.hpp"
#include "shader.hpp"
#include "shader_signature.hpp"
#include "constants.hpp"
#include "../Common/texture_formats_common.hpp"
#include <set>
#include <algorithm>
namespace AgpuGL
{
void processTextureWithSamplerCombinations(const std::set<TextureWithSamplerCombination> &rawTextureSamplerCombinations, const agpu::shader_signature_ref &shaderSignature, TextureWithSamplerCombinationMap &map, std::vector<MappedTextureWithSamplerCombination> &usedCombinations)
{
// Split between natural pairs, and non-natural pairs.
std::vector<MappedTextureWithSamplerCombination> naturalTextureWithSamplerCombinations;
std::vector<MappedTextureWithSamplerCombination> nonNaturalTextureWithSamplerCombinations;
auto glShaderSignature = shaderSignature.as<GLShaderSignature> ();
for (auto & combination : rawTextureSamplerCombinations)
{
int textureUnit = glShaderSignature->mapDescriptorSetAndBinding(AGPU_SHADER_BINDING_TYPE_SAMPLED_IMAGE, combination.textureDescriptorSet, combination.textureDescriptorBinding);
if (textureUnit < 0)
return;
int sampler = glShaderSignature->mapDescriptorSetAndBinding(AGPU_SHADER_BINDING_TYPE_SAMPLER, combination.samplerDescriptorSet, combination.samplerDescriptorBinding);
if (sampler < 0)
return;
MappedTextureWithSamplerCombination mappedCombination;
mappedCombination.combination = combination;
mappedCombination.name = combination.createName();
mappedCombination.sourceTextureUnit = textureUnit;
mappedCombination.sourceSamplerUnit = sampler;
if (textureUnit == sampler)
naturalTextureWithSamplerCombinations.push_back(mappedCombination);
else
nonNaturalTextureWithSamplerCombinations.push_back(mappedCombination);
}
auto naturalTextureUnitCount = glShaderSignature->bindingPointsUsed[(int)OpenGLResourceBindingType::Sampler];
// Assign the natural pairs
usedCombinations.reserve(naturalTextureWithSamplerCombinations.size() + nonNaturalTextureWithSamplerCombinations.size());
for (auto &combination : naturalTextureWithSamplerCombinations)
{
combination.mappedTextureUnit = combination.mappedSamplerUnit = combination.sourceSamplerUnit;
usedCombinations.push_back(combination);
}
// Assign the non-natural pairs
auto nextTextureUnit = naturalTextureUnitCount;
for (auto &combination : nonNaturalTextureWithSamplerCombinations)
{
combination.mappedTextureUnit = nextTextureUnit++;
combination.mappedSamplerUnit = combination.sourceSamplerUnit;
usedCombinations.push_back(combination);
}
for (auto &combination : usedCombinations)
{
map.insert(std::make_pair(combination.combination, combination));
}
}
GLGraphicsPipelineBuilder::GLGraphicsPipelineBuilder()
{
// Depth buffer
depthEnabled = false;
depthWriteMask = true;
depthFunction = AGPU_LESS;
// Depth biasing
depthBiasEnabled = false;
depthBiasConstantFactor = 0;
depthBiasClamp = 0;
depthBiasSlopeFactor = 0;
// Face culling
frontFaceWinding = AGPU_COUNTER_CLOCKWISE;
cullingMode = AGPU_CULL_MODE_NONE;
// Polgons
polygonMode = AGPU_POLYGON_MODE_FILL;
// Color buffer
blendingEnabled = false;
redMask = true;
greenMask = true;
blueMask = true;
alphaMask = true;
sourceBlendFactor = AGPU_BLENDING_ONE;
destBlendFactor = AGPU_BLENDING_ZERO;
blendOperation = AGPU_BLENDING_OPERATION_ADD;
sourceBlendFactorAlpha = AGPU_BLENDING_ONE;
destBlendFactorAlpha = AGPU_BLENDING_ZERO;
blendOperationAlpha = AGPU_BLENDING_OPERATION_ADD;
// Stencil buffer
stencilEnabled = false;
stencilWriteMask = ~0;
stencilReadMask = ~0;
stencilFrontFailOp = AGPU_KEEP;
stencilFrontDepthFailOp = AGPU_KEEP;
stencilFrontDepthPassOp = AGPU_KEEP;
stencilFrontFunc = AGPU_ALWAYS;
stencilBackFailOp = AGPU_KEEP;
stencilBackDepthFailOp = AGPU_KEEP;
stencilBackDepthPassOp = AGPU_KEEP;
stencilBackFunc = AGPU_ALWAYS;
// Render targets
renderTargetFormats.resize(1, AGPU_TEXTURE_FORMAT_B8G8R8A8_UNORM);
depthStencilFormat = AGPU_TEXTURE_FORMAT_D24_UNORM_S8_UINT;
primitiveType = AGPU_POINTS;
}
GLGraphicsPipelineBuilder::~GLGraphicsPipelineBuilder()
{
}
agpu::pipeline_builder_ref GLGraphicsPipelineBuilder::createBuilder(const agpu::device_ref &device)
{
auto result = agpu::makeObject<GLGraphicsPipelineBuilder> ();
auto builder = result.as<GLGraphicsPipelineBuilder> ();
builder->device = device;
builder->reset();
return result;
}
agpu_error GLGraphicsPipelineBuilder::reset()
{
shaders.clear();
errorMessages.clear();
return AGPU_OK;
}
void GLGraphicsPipelineBuilder::buildTextureWithSampleCombinationMapInto(TextureWithSamplerCombinationMap &map, std::vector<MappedTextureWithSamplerCombination> &usedCombinations)
{
std::set<TextureWithSamplerCombination> rawTextureSamplerCombinations;
// Get all of the combinations.
for(auto &shaderWithEntryPoint : shaders)
{
auto shader = shaderWithEntryPoint.first;
if(!shader)
continue;
for (auto combination : shader.as<GLShader>()->getTextureWithSamplerCombination(shaderWithEntryPoint.second))
rawTextureSamplerCombinations.insert(combination);
}
processTextureWithSamplerCombinations(rawTextureSamplerCombinations, shaderSignature, map, usedCombinations);
}
agpu::pipeline_state_ptr GLGraphicsPipelineBuilder::build ()
{
GLuint program = 0;
GLint baseInstanceUniformIndex = -1;
bool succeded = true;
std::vector<GLShaderForSignatureRef> shaderInstances;
std::vector<MappedTextureWithSamplerCombination> mappedTextureWithSamplerCombinations;
TextureWithSamplerCombinationMap textureWithSamplerCombinationMap;
if(!shaders.empty())
{
buildTextureWithSampleCombinationMapInto(textureWithSamplerCombinationMap, mappedTextureWithSamplerCombinations);
// Instantiate the shaders
for(auto &shaderWithEntryPoint : shaders)
{
const auto &shader = shaderWithEntryPoint.first;
if(!shaderSignature)
{
errorMessages += "Missing shader signature.";
succeded = false;
break;
}
GLShaderForSignatureRef shaderForSignature;
std::string errorMessage;
// Create the shader instance.
auto error = shader.as<GLShader> ()->instanceForSignature(shaderSignature, textureWithSamplerCombinationMap, shaderWithEntryPoint.second, &shaderForSignature, &errorMessage);
errorMessages += errorMessage;
if(error != AGPU_OK)
{
printError("Instance error: %d:%s\n", error, errorMessage.c_str());
succeded = false;
break;
}
shaderInstances.push_back(shaderForSignature);
}
if(!succeded)
return nullptr;
succeded = false;
deviceForGL->onMainContextBlocking([&]{
// Create the progrma
program = deviceForGL->glCreateProgram();
// Attach the shaders.
for(auto shaderInstance : shaderInstances)
{
// Attach the shader instance to the program.
std::string errorMessage;
auto error = shaderInstance->attachToProgram(program, &errorMessage);
errorMessages += errorMessage;
if(error != AGPU_OK)
return;
}
// Link the program.
deviceForGL->glLinkProgram(program);
// Check the link status
GLint status;
deviceForGL->glGetProgramiv(program, GL_LINK_STATUS, &status);
if(status != GL_TRUE)
{
// TODO: Get the info log
return;
}
// Get some special uniforms
baseInstanceUniformIndex = deviceForGL->glGetUniformLocation(program, "SPIRV_Cross_BaseInstance");
succeded = true;
});
}
if(!succeded)
return nullptr;
// Create the pipeline state object
auto result = agpu::makeObject<GLPipelineState> ();
auto pipeline = result.as<GLPipelineState> ();
pipeline->device = device;
pipeline->programHandle = program;
pipeline->type = AgpuPipelineStateType::Graphics;
pipeline->shaderSignature = shaderSignature;
pipeline->shaderInstances = shaderInstances;
pipeline->mappedTextureWithSamplerCombinations = mappedTextureWithSamplerCombinations;
auto graphicsState = new AgpuGraphicsPipelineStateData();
graphicsState->device = device;
pipeline->extraStateData = graphicsState;
// Base instance
graphicsState->baseInstanceUniformIndex = baseInstanceUniformIndex;
// Depth state
graphicsState->depthEnabled = depthEnabled;
graphicsState->depthWriteMask = depthWriteMask;
graphicsState->depthFunction = mapCompareFunction(depthFunction);
// Face culling
graphicsState->frontFaceWinding = mapFaceWinding(frontFaceWinding);<|fim▁hole|> // Color buffer
graphicsState->blendingEnabled = blendingEnabled;
graphicsState->redMask = redMask;
graphicsState->greenMask = greenMask;
graphicsState->blueMask = blueMask;
graphicsState->alphaMask = alphaMask;
graphicsState->sourceBlendFactor = mapBlendFactor(sourceBlendFactor, false);
graphicsState->destBlendFactor = mapBlendFactor(destBlendFactor, false);
graphicsState->blendOperation = mapBlendOperation(blendOperation);
graphicsState->sourceBlendFactorAlpha = mapBlendFactor(sourceBlendFactorAlpha, true);
graphicsState->destBlendFactorAlpha = mapBlendFactor(destBlendFactorAlpha, true);
graphicsState->blendOperationAlpha = mapBlendOperation(blendOperationAlpha);
// Stencil testing
graphicsState->stencilEnabled = stencilEnabled;
graphicsState->stencilWriteMask = stencilWriteMask;
graphicsState->stencilReadMask = stencilReadMask;
graphicsState->stencilFrontFailOp = mapStencilOperation(stencilFrontFailOp);
graphicsState->stencilFrontDepthFailOp = mapStencilOperation(stencilFrontDepthFailOp);
graphicsState->stencilFrontDepthPassOp = mapStencilOperation(stencilFrontDepthPassOp);
graphicsState->stencilFrontFunc = mapCompareFunction(stencilFrontFunc);
graphicsState->stencilBackFailOp = mapStencilOperation(stencilBackFailOp);
graphicsState->stencilBackDepthFailOp = mapStencilOperation(stencilBackDepthFailOp);
graphicsState->stencilBackDepthPassOp = mapStencilOperation(stencilBackDepthPassOp);
graphicsState->stencilBackFunc = mapCompareFunction(stencilBackFunc);
// Multisampling
graphicsState->sampleCount = sampleCount;
graphicsState->sampleQuality = sampleQuality;
// Miscellaneous
graphicsState->primitiveTopology = primitiveType;
graphicsState->renderTargetCount = (int)renderTargetFormats.size();
graphicsState->hasSRGBTarget = false;
for (auto format : renderTargetFormats)
{
if(isSRGBTextureFormat(format))
{
graphicsState->hasSRGBTarget = true;
break;
}
}
return result.disown();
}
agpu_error GLGraphicsPipelineBuilder::attachShader(const agpu::shader_ref &shader )
{
CHECK_POINTER(shader);
return attachShaderWithEntryPoint(shader, shader.as<GLShader> ()->type, "main");
}
agpu_error GLGraphicsPipelineBuilder::attachShaderWithEntryPoint(const agpu::shader_ref &shader, agpu_shader_type type, agpu_cstring entry_point )
{
CHECK_POINTER(shader);
shaders.push_back(std::make_pair(shader, entry_point));
return AGPU_OK;
}
agpu_size GLGraphicsPipelineBuilder::getBuildingLogLength ( )
{
return (agpu_size)errorMessages.size();
}
agpu_error GLGraphicsPipelineBuilder::getBuildingLog ( agpu_size buffer_size, agpu_string_buffer buffer )
{
if(buffer_size == 0)
return AGPU_OK;
size_t toCopy = std::min(size_t(buffer_size - 1), errorMessages.size());
if(toCopy > 0)
memcpy(buffer, errorMessages.data(), toCopy);
buffer[buffer_size-1] = 0;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setShaderSignature(const agpu::shader_signature_ref &signature)
{
CHECK_POINTER(signature);
shaderSignature = signature;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setBlendState(agpu_int renderTargetMask, agpu_bool enabled)
{
this->blendingEnabled = enabled;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setBlendFunction(agpu_int renderTargetMask, agpu_blending_factor sourceFactor, agpu_blending_factor destFactor, agpu_blending_operation colorOperation, agpu_blending_factor sourceAlphaFactor, agpu_blending_factor destAlphaFactor, agpu_blending_operation alphaOperation)
{
this->sourceBlendFactor = sourceFactor;
this->destBlendFactor = destFactor;
this->blendOperation = colorOperation;
this->sourceBlendFactorAlpha = sourceAlphaFactor;
this->destBlendFactorAlpha = destAlphaFactor;
this->blendOperationAlpha = alphaOperation;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setColorMask(agpu_int renderTargetMask, agpu_bool redEnabled, agpu_bool greenEnabled, agpu_bool blueEnabled, agpu_bool alphaEnabled)
{
this->redMask = redEnabled;
this->greenMask = greenEnabled;
this->blueMask = blueEnabled;
this->alphaMask = alphaEnabled;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setFrontFace ( agpu_face_winding winding )
{
this->frontFaceWinding = winding;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setCullMode ( agpu_cull_mode mode )
{
this->cullingMode = mode;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setDepthBias ( agpu_float constant_factor, agpu_float clamp, agpu_float slope_factor )
{
this->depthBiasEnabled = true;
this->depthBiasConstantFactor = constant_factor;
this->depthBiasClamp = clamp;
this->depthBiasSlopeFactor = slope_factor;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setDepthState ( agpu_bool enabled, agpu_bool writeMask, agpu_compare_function function )
{
this->depthEnabled = enabled;
this->depthWriteMask = writeMask;
this->depthFunction = function;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setStencilState ( agpu_bool enabled, agpu_int writeMask, agpu_int readMask )
{
this->stencilEnabled = enabled;
this->stencilWriteMask = writeMask;
this->stencilReadMask = readMask;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setStencilFrontFace(agpu_stencil_operation stencilFailOperation, agpu_stencil_operation depthFailOperation, agpu_stencil_operation stencilDepthPassOperation, agpu_compare_function stencilFunction)
{
this->stencilFrontFailOp = stencilFailOperation;
this->stencilFrontDepthFailOp = depthFailOperation;
this->stencilFrontDepthPassOp = stencilDepthPassOperation;
this->stencilFrontFunc = stencilFunction;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setStencilBackFace(agpu_stencil_operation stencilFailOperation, agpu_stencil_operation depthFailOperation, agpu_stencil_operation stencilDepthPassOperation, agpu_compare_function stencilFunction)
{
this->stencilBackFailOp = stencilFailOperation;
this->stencilBackDepthFailOp = depthFailOperation;
this->stencilBackDepthPassOp = stencilDepthPassOperation;
this->stencilBackFunc = stencilFunction;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setRenderTargetCount(agpu_int count)
{
renderTargetFormats.resize(count, AGPU_TEXTURE_FORMAT_B8G8R8A8_UNORM);
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setRenderTargetFormat(agpu_uint index, agpu_texture_format format)
{
if (index >= renderTargetFormats.size())
return AGPU_INVALID_PARAMETER;
renderTargetFormats[index] = format;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setDepthStencilFormat(agpu_texture_format format)
{
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setPolygonMode(agpu_polygon_mode mode)
{
this->polygonMode = mode;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setPrimitiveType(agpu_primitive_topology type)
{
this->primitiveType = type;
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setVertexLayout(const agpu::vertex_layout_ref &layout)
{
return AGPU_OK;
}
agpu_error GLGraphicsPipelineBuilder::setSampleDescription(agpu_uint sample_count, agpu_uint sample_quality)
{
this->sampleCount = sample_count;
this->sampleQuality = sample_quality;
return AGPU_OK;
}
} // End of namespace AgpuGL<|fim▁end|> | graphicsState->cullingMode = mapCullingMode(cullingMode);
|
<|file_name|>IP_SSATaucTaoTikhonovProblemLCL.cc<|end_file_name|><|fim▁begin|>// Copyright (C) 2012, 2014, 2015, 2016, 2017 David Maxwell and Constantine Khroulev
//
// This file is part of PISM.
//
// PISM is free software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the Free Software
// Foundation; either version 3 of the License, or (at your option) any later
// version.
//
// PISM is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
// details.
//
// You should have received a copy of the GNU General Public License
// along with PISM; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#include "IP_SSATaucTaoTikhonovProblemLCL.hh"
#include "pism/util/IceGrid.hh"
#include "pism/util/ConfigInterface.hh"
namespace pism {
namespace inverse {
typedef IceModelVec2S DesignVec;
typedef IceModelVec2V StateVec;
// typedef TikhonovProblemListener<InverseProblem> Listener;
// typedef typename Listener::Ptr ListenerPtr;
IP_SSATaucTaoTikhonovProblemLCL::IP_SSATaucTaoTikhonovProblemLCL(IP_SSATaucForwardProblem &ssaforward,
IP_SSATaucTaoTikhonovProblemLCL::DesignVec &d0,
IP_SSATaucTaoTikhonovProblemLCL::StateVec &u_obs,
double eta,
IPFunctional<DesignVec> &designFunctional,
IPFunctional<StateVec> &stateFunctional)
: m_ssaforward(ssaforward), m_d0(d0), m_u_obs(u_obs), m_eta(eta),
m_designFunctional(designFunctional), m_stateFunctional(stateFunctional) {
PetscErrorCode ierr;
IceGrid::ConstPtr grid = m_d0.get_grid();
double stressScale = grid->ctx()->config()->get_double("inverse.design.param_tauc_scale");
m_constraintsScale = grid->Lx()*grid->Ly()*4*stressScale;
m_velocityScale = grid->ctx()->config()->get_double("inverse.ssa.velocity_scale", "m second-1");
int design_stencil_width = m_d0.get_stencil_width();
int state_stencil_width = m_u_obs.get_stencil_width();
m_d.reset(new DesignVec(grid, "design variable", WITH_GHOSTS, design_stencil_width));
m_d_Jdesign.create(grid, "Jdesign design variable", WITH_GHOSTS, design_stencil_width);
m_dGlobal.create(grid, "design variable (global)", WITHOUT_GHOSTS, design_stencil_width);
m_dGlobal.copy_from(m_d0);
m_uGlobal.reset(new StateVec(grid, "state variable (global)",
WITHOUT_GHOSTS, state_stencil_width));
m_u.create(grid, "state variable", WITH_GHOSTS, state_stencil_width);
m_du.create(grid, "du", WITH_GHOSTS, state_stencil_width);
m_u_Jdesign.create(grid, "Jdesign state variable", WITH_GHOSTS, state_stencil_width);
m_u_diff.reset(new StateVec(grid, "state residual", WITH_GHOSTS, state_stencil_width));
m_d_diff.reset(new DesignVec(grid, "design residual", WITH_GHOSTS, design_stencil_width));
m_dzeta.create(grid,"dzeta",WITH_GHOSTS,design_stencil_width);
m_grad_state.reset(new StateVec(grid, "state gradient", WITHOUT_GHOSTS, state_stencil_width));
m_grad_design.reset(new DesignVec(grid, "design gradient", WITHOUT_GHOSTS, design_stencil_width));
m_constraints.reset(new StateVec(grid,"PDE constraints",WITHOUT_GHOSTS,design_stencil_width));
DM da;
m_ssaforward.get_da(&da);
ierr = DMSetMatType(da, MATBAIJ);
PISM_CHK(ierr, "DMSetMatType");
ierr = DMCreateMatrix(da, m_Jstate.rawptr());
PISM_CHK(ierr, "DMCreateMatrix");
int nLocalNodes = grid->xm()*grid->ym();
int nGlobalNodes = grid->Mx()*grid->My();
ierr = MatCreateShell(grid->com, 2*nLocalNodes, nLocalNodes, 2*nGlobalNodes, nGlobalNodes,
this, m_Jdesign.rawptr());
PISM_CHK(ierr, "MatCreateShell");
ierr = MatShellSetOperation(m_Jdesign, MATOP_MULT,
(void(*)(void))jacobian_design_callback);
PISM_CHK(ierr, "MatShellSetOperation");
ierr = MatShellSetOperation(m_Jdesign, MATOP_MULT_TRANSPOSE,
(void(*)(void))jacobian_design_transpose_callback);
PISM_CHK(ierr, "MatShellSetOperation");
m_x.reset(new IPTwoBlockVec(m_dGlobal.get_vec(),m_uGlobal->get_vec()));
}
IP_SSATaucTaoTikhonovProblemLCL::~IP_SSATaucTaoTikhonovProblemLCL()
{
// empty
}
void IP_SSATaucTaoTikhonovProblemLCL::setInitialGuess(DesignVec &d0) {
m_dGlobal.copy_from(d0);
}
IP_SSATaucTaoTikhonovProblemLCL::StateVec::Ptr IP_SSATaucTaoTikhonovProblemLCL::stateSolution() {
m_x->scatterToB(m_uGlobal->get_vec());
m_uGlobal->scale(m_velocityScale);
return m_uGlobal;<|fim▁hole|> m_x->scatterToA(m_d->get_vec()); //CHKERRQ(ierr);
return m_d;
}
void IP_SSATaucTaoTikhonovProblemLCL::connect(Tao tao) {
PetscErrorCode ierr;
ierr = TaoSetStateDesignIS(tao,
m_x->blockBIndexSet() /*state*/,
m_x->blockAIndexSet() /*design*/);
PISM_CHK(ierr, "TaoSetStateDesignIS");
taoutil::TaoObjGradCallback<IP_SSATaucTaoTikhonovProblemLCL,
&IP_SSATaucTaoTikhonovProblemLCL::evaluateObjectiveAndGradient>::connect(tao, *this);
taoutil::TaoLCLCallbacks<IP_SSATaucTaoTikhonovProblemLCL>::connect(tao, *this,
m_constraints->get_vec(),
m_Jstate, m_Jdesign);
taoutil::TaoMonitorCallback<IP_SSATaucTaoTikhonovProblemLCL>::connect(tao,*this);
}
void IP_SSATaucTaoTikhonovProblemLCL::monitorTao(Tao tao) {
PetscErrorCode ierr;
// Has to be a PetscInt because of the TaoGetSolutionStatus call.
PetscInt its;
ierr = TaoGetSolutionStatus(tao, &its, NULL, NULL, NULL, NULL, NULL);
PISM_CHK(ierr, "TaoGetSolutionStatus");
int nListeners = m_listeners.size();
for (int k = 0; k < nListeners; k++) {
m_listeners[k]->iteration(*this, m_eta,
its, m_val_design, m_val_state,
m_d, m_d_diff, m_grad_design,
m_ssaforward.solution(),
m_u_diff,
m_grad_state,
m_constraints);
}
}
void IP_SSATaucTaoTikhonovProblemLCL::evaluateObjectiveAndGradient(Tao /*tao*/, Vec x,
double *value, Vec gradient) {
m_x->scatter(x,m_dGlobal.get_vec(),m_uGlobal->get_vec());
m_uGlobal->scale(m_velocityScale);
// Variable 'm_dGlobal' has no ghosts. We need ghosts for computation with the design variable.
m_d->copy_from(m_dGlobal);
m_d_diff->copy_from(*m_d);
m_d_diff->add(-1,m_d0);
m_designFunctional.gradientAt(*m_d_diff, *m_grad_design);
m_grad_design->scale(1/m_eta);
m_u_diff->copy_from(*m_uGlobal);
m_u_diff->add(-1, m_u_obs);
m_stateFunctional.gradientAt(*m_u_diff, *m_grad_state);
m_grad_state->scale(m_velocityScale);
m_x->gather(m_grad_design->get_vec(), m_grad_state->get_vec(), gradient);
m_designFunctional.valueAt(*m_d_diff, &m_val_design);
m_stateFunctional.valueAt(*m_u_diff, &m_val_state);
*value = m_val_design / m_eta + m_val_state;
}
TerminationReason::Ptr IP_SSATaucTaoTikhonovProblemLCL::formInitialGuess(Vec *x) {
m_d->copy_from(m_dGlobal);
TerminationReason::Ptr reason = m_ssaforward.linearize_at(*m_d);
if (reason->failed()) {
return reason;
}
m_uGlobal->copy_from(*m_ssaforward.solution());
m_uGlobal->scale(1.0 / m_velocityScale);
m_x->gather(m_dGlobal.get_vec(), m_uGlobal->get_vec());
// This is probably irrelevant.
m_uGlobal->scale(m_velocityScale);
*x = *m_x;
return GenericTerminationReason::success();
}
void IP_SSATaucTaoTikhonovProblemLCL::evaluateConstraints(Tao, Vec x, Vec r) {
PetscErrorCode ierr;
m_x->scatter(x,m_dGlobal.get_vec(),m_uGlobal->get_vec());
m_uGlobal->scale(m_velocityScale);
m_d->copy_from(m_dGlobal);
m_u.copy_from(*m_uGlobal);
m_ssaforward.set_design(*m_d);
m_ssaforward.assemble_residual(m_u, r);
ierr = VecScale(r,1./m_constraintsScale);
PISM_CHK(ierr, "VecScale");
}
void IP_SSATaucTaoTikhonovProblemLCL::evaluateConstraintsJacobianState(Tao, Vec x,
Mat Jstate,
Mat /*Jpc*/,
Mat /*Jinv*/,
MatStructure *s) {
PetscErrorCode ierr;
m_x->scatter(x, m_dGlobal.get_vec(), m_uGlobal->get_vec());
m_uGlobal->scale(m_velocityScale);
m_d->copy_from(m_dGlobal);
m_u.copy_from(*m_uGlobal);
m_ssaforward.set_design(*m_d);
m_ssaforward.assemble_jacobian_state(m_u, Jstate);
*s = SAME_NONZERO_PATTERN;
ierr = MatScale(Jstate, m_velocityScale / m_constraintsScale);
PISM_CHK(ierr, "MatScale");
}
void IP_SSATaucTaoTikhonovProblemLCL::evaluateConstraintsJacobianDesign(Tao, Vec x, Mat /*Jdesign*/) {
// I'm not sure if the following are necessary (i.e. will the copies that happen
// in evaluateObjectiveAndGradient be sufficient) but we'll do them here
// just in case.
m_x->scatter(x,m_dGlobal.get_vec(),m_uGlobal->get_vec());
m_uGlobal->scale(m_velocityScale);
m_d_Jdesign.copy_from(m_dGlobal);
m_u_Jdesign.copy_from(*m_uGlobal);
}
void IP_SSATaucTaoTikhonovProblemLCL::applyConstraintsJacobianDesign(Vec x, Vec y) {
m_dzeta.copy_from_vec(x);
m_ssaforward.set_design(m_d_Jdesign);
m_ssaforward.apply_jacobian_design(m_u_Jdesign, m_dzeta, y);
PetscErrorCode ierr = VecScale(y,1./m_constraintsScale);
PISM_CHK(ierr, "VecScale");
}
void IP_SSATaucTaoTikhonovProblemLCL::applyConstraintsJacobianDesignTranspose(Vec x, Vec y) {
m_du.copy_from_vec(x);
m_ssaforward.set_design(m_d_Jdesign);
m_ssaforward.apply_jacobian_design_transpose(m_u_Jdesign, m_du, y);
PetscErrorCode ierr = VecScale(y, 1.0 / m_constraintsScale);
PISM_CHK(ierr, "VecScale");
}
PetscErrorCode IP_SSATaucTaoTikhonovProblemLCL::jacobian_design_callback(Mat A, Vec x, Vec y) {
try {
IP_SSATaucTaoTikhonovProblemLCL *ctx;
PetscErrorCode ierr = MatShellGetContext(A,&ctx);
PISM_CHK(ierr, "MatShellGetContext");
ctx->applyConstraintsJacobianDesign(x,y);
} catch (...) {
MPI_Comm com = MPI_COMM_SELF;
PetscErrorCode ierr = PetscObjectGetComm((PetscObject)A, &com); CHKERRQ(ierr);
handle_fatal_errors(com);
SETERRQ(com, 1, "A PISM callback failed");
}
return 0;
}
PetscErrorCode IP_SSATaucTaoTikhonovProblemLCL::jacobian_design_transpose_callback(Mat A, Vec x,
Vec y) {
try {
IP_SSATaucTaoTikhonovProblemLCL *ctx;
PetscErrorCode ierr = MatShellGetContext(A,&ctx);
PISM_CHK(ierr, "MatShellGetContext");
ctx->applyConstraintsJacobianDesignTranspose(x,y);
} catch (...) {
MPI_Comm com = MPI_COMM_SELF;
PetscErrorCode ierr = PetscObjectGetComm((PetscObject)A, &com); CHKERRQ(ierr);
handle_fatal_errors(com);
SETERRQ(com, 1, "A PISM callback failed");
}
return 0;
}
} // end of namespace inverse
} // end of namespace pism<|fim▁end|> | }
IP_SSATaucTaoTikhonovProblemLCL::DesignVec::Ptr IP_SSATaucTaoTikhonovProblemLCL::designSolution() { |
<|file_name|>lhpFunctions.py<|end_file_name|><|fim▁begin|>import operator
def pozicijaSprite(broj, x_velicina):
#vraca pixel na kojem se sprite nalazi
pixel = broj * (x_velicina + 1) #1 je prazan red izmedu spritova
return(pixel)
#spriteSlova = ["A", "B", "C", "D", "E", "F", "G", "H", "i", "s", "e"]
spriteSlova = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "s", ",", "'", "1", "2", "4", "8", "6", "3", ".", "5", "7", "9", "0", "M", "B", "I", "N", "S", "E", "R", "T", " ", "-", "V","U" ,"A", "L", "O", "D", ":", "m", "j", "n", "u", "C", "H", "k", "l", "o", "p", "r", "t", "v", "z", "K", "P", "%", "/"]
def pixel2Ton(pixel):
rezolucija = 90
indent = -12 #extra pixeli
height = 3
broj = ( rezolucija - pixel - indent ) / height
return(int(broj))
predikati = {
0 : 0,
1 : -1,
2 : 1,
3 : 0
}
kljucevi = {
0 : ("d", ",,"),
1 : ("e", ",,"),
2 : ("f", ",,"),
3 : ("g", ",,"),
4 : ("a", ",,"),
5 : ("h", ",,"),
6 : ("c", ","),
7 : ("d", ","),
8 : ("e", ","),
9 : ("f", ","),
10 : ("g", ","),
11 : ("a", ","),
12 : ("h", ","),
13 : ("c", ""),
14 : ("d", ""),
15 : ("e", ""),
16 : ("f", ""),
17 : ("g", ""),
18 : ("a", ""),
19 : ("h", ""),
20 : ("c", "'"),
21 : ("d", "'"),
22 : ("e", "'"),
23 : ("f", "'"),
24 : ("g", "'"),
25 : ("a", "'"),
26 : ("h", "'"),
27 : ("c", "''"),
28 : ("d", "''"),
29 : ("e", "''"),
30 : ("f", "''"),
31 : ("g", "''"),
32 : ("a", "''"),
33 : ("h", "''"),
34 : ("c", "'''"),
35 : ("d", "'''"),
36 : ("e", "'''"),
37 : ("f", "'''"),
38 : ("g", "'''"),
39 : ("a", "'''"),
40 : ("h", "'''")
}
def removeLily(slovo):
return(slovo.replace(',', '').replace('\'', '').upper())
def slovoPozicija(slovo):<|fim▁hole|>
rijecnikNotnihVrijednosti = {
0 : "16",
1 : "8",
2 : "8.",
3 : "4",
4 : "416",
5 : "4.",
6 : "4.16",
7 : "2",
8 : "216",
9 : "28",
10 : "28.",
11 : "2.",
12 : "2.16",
13 : "2.8",
14 : "2.8.",
15 : "1"
}
def pixel2Pozicija(pixel):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def pixel2Trajanje(pixel):
indent = 4
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def ton2Pixel(ton):
rezolucija = 90
indent = -12
height = 3
pixel = rezolucija - indent - ( ton * height )
return(pixel)
def pozicija2Pixel(pozicija):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
pixel = pozicija * width + indent
return(pixel)
def trajanje2Pixel(trajanje):
indent = 4
width = 6
pixel = trajanje * width + indent
return(pixel)
class dodaj_notu(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_chord(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_markup(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class cursor(object):
def __init__(self, pozicija, ton, trajanje):
self.pozicija = pozicija
self.ton = ton
self.trajanje = trajanje
self.sprite = 0
self.bg_scroll_x = 0
self.bg_scroll_y = 0
self.bg_scroll_x_offset = 0 #used for cursor follow efect
self.bg_scroll_y_offset = 0 #used for cursor follow efect
self.apsolute_x = 0 #used for cursor follow efect
self.apsolute_y = 0 #used for cursor follow efect
def checkXColision(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("kolizija na pocetku note s CL")
return(True)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note s CL")
return(True)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CL")
return(True)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("kolizija na pocetku note s CR")
return(True)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note sa CR")
return(True)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CR")
return(True)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("kolizija note unutar Cursora")
return(True)
else:
return(False)
#sortiraj listu klasa
#lista.sort(key=operator.attrgetter('broj'))
def findNote(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("na pocetku note s CL")
return(1)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("na sredini note s CL")
return(2)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CL")
return(3)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("na pocetku note s CR")
return(4)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("na sredini note sa CR")
return(5)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CR")
return(6)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("note unutar Cursora")
return(7)
else:
return(False)
letter2MidiNumberPrefix = {
"c" : "0",
"d" : "2",
"e" : "4",
"f" : "5",
"g" : "7",
"a" : "9",
"h" : "11",
}
letter2MidiOctave = {
",," : "24",
"," : "36",
"" : "48",
"'" : "60",
"''" : "72",
"'''" : "84",
}
predikat2Midi = {
0 : 0,
1 : 1,
2 : -1,
}
def nota2MidiNumber(nota):
return(int(letter2MidiNumberPrefix[kljucevi[nota.ton][0]]) + int(letter2MidiOctave[kljucevi[nota.ton][1]]) + int(predikat2Midi[nota.predikat]))
def get_git_revision_short_hash():
import subprocess
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])<|fim▁end|> | for i in [i for i,x in enumerate(spriteSlova) if x == slovo]:
return(i) |
<|file_name|>IActivationFunctor.java<|end_file_name|><|fim▁begin|>package org.amse.marinaSokol.model.interfaces.object.net;
public interface IActivationFunctor {
/**
* Ôóíêöèÿ àêòèâàöèè
* @param x - ÷èñëî, ïîäàííîå íà ôóíêöèþ àêòèâàöèè
<|fim▁hole|>
/**
* ïðîèçâîäíàÿ ôóíêöèè àêòèâàöèè
* @param x - ÷èñëî, ïîäàííîå íà ôóíêöèþ
* @return âûõîä
* */
double getDerivation(double x);
/**
* Èìÿ ôóíêöèè àêòèâàöèè
* @return èìÿ ôóíêöèè àêòèâàöèè
* */
String getNameFunction();
}<|fim▁end|> | * @return âûõîä íåéðîíà
* */
double getFunction(double x);
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from vitrage_dashboard.entities import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>event_loop.rs<|end_file_name|><|fim▁begin|>use std::sync::{Arc, Condvar, Mutex};
use std::thread::spawn;
use std::time::{Duration, Instant};
use super::schedule_queue::*;
use super::scheduler::*;
#[derive(Clone)]
pub struct EventLoop {
queue: Arc<(Mutex<ScheduleQueue<Box<Action + Send>>>, Condvar)>,
}
impl EventLoop {
/// Creates a new EventLoop
pub fn new() -> Self {
let queue = Arc::new((Mutex::new(ScheduleQueue::new()), Condvar::new()));
let scheduler = EventLoop { queue: queue.clone() };
spawn(move || {
loop {
let mut action = dequeue(&queue);
action.invoke();
}
});
scheduler
}
}
fn dequeue(queue: &Arc<(Mutex<ScheduleQueue<Box<Action + Send>>>, Condvar)>) -> Box<Action> {
let (ref mutex, ref cvar) = **queue;
let mut queue = mutex.lock().unwrap();
loop {
if let Some(record) = queue.dequeue() {
let now = Instant::now();
if record.1 <= now {
return record.0;
} else {
let timeout = now - record.1;
let r = cvar.wait_timeout(queue, timeout).unwrap();
queue = r.0;
if r.1.timed_out() {<|fim▁hole|> return record.0;
} else {
queue.enqueue(record);
continue;
}
}
} else {
queue = cvar.wait(queue).unwrap();
}
}
}
impl ParallelScheduler for EventLoop {
fn schedule<F>(&self, func: F, delay: Duration)
where F: FnOnce() + Send + 'static
{
let due = Instant::now() + delay;
let &(ref mutex, ref cvar) = &*self.queue;
mutex.lock().unwrap().enqueue((Box::new(Some(func)), due));
cvar.notify_one();
}
}
trait Action {
fn invoke(&mut self);
}
impl<F> Action for Option<F>
where F: FnOnce() + Send
{
fn invoke(&mut self) {
if let Some(action) = self.take() {
action();
}
}
}<|fim▁end|> | |
<|file_name|>Item.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.retrofit2.app;
import auto.parcel.AutoParcel;
import android.os.Parcelable;
import android.support.annotation.Nullable;
@AutoParcel
public abstract class Item implements Parcelable {
@Nullable
public abstract String icon();
@Nullable
public abstract String text1();
@AutoParcel.Builder
public abstract static class Builder {
public abstract Builder icon(String s);<|fim▁hole|> public static Builder builder() {
return new AutoParcel_Item.Builder();
}
public abstract Builder toBuilder();
}<|fim▁end|> | public abstract Builder text1(String s);
public abstract Item build();
}
|
<|file_name|>mdi.py<|end_file_name|><|fim▁begin|># Touchy is Copyright (c) 2009 Chris Radek <[email protected]>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M61' : [_('Set tool number'), 'Q'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'I', 'K'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],<|fim▁hole|> 'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
for i in range(self.numlabels):
self.not_editing(i)
self.editing(self.selected)
self.set_text("G")
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t + ".")
def minus(self, b):
t = self.get_text()
if self.selected > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
if tail.find("-") == -1:
self.set_text(head + "-" + tail)
else:
self.set_text(head + tail[1:])
def keypad(self, b):
t = self.get_text()
num = b.get_name()
self.set_text(t + num)
def gp(self, b):
self.g(b, "G", 1)
def g(self, b, code="G", polar=0):
self.mdi.set_polar(polar)
self.set_text(code, 0)
for i in range(1, self.numlabels):
self.set_text("", i)
self.editing(0)
self.mdi.clear()
def m(self, b):
self.g(b, "M")
def t(self, b):
self.g(b, "T")
def o(self, b):
old_code = self.labels[0].get_text()
ocodes = self.mdi.ocodes
if old_code in ocodes:
j = (ocodes.index(old_code) + 1) % len(ocodes)
else:
j = 0
self.g(b, ocodes[j])
self.next(b)
def select(self, eventbox, event):
n = int(eventbox.get_name()[12:])
if self.selected == 0:
self.fill_out()
if n <= self.numwords:
self.editing(n)
def set_tool(self, tool, g10l11):
self.g(0)
self.set_text("G10", 0)
self.next(0)
if g10l11:
self.set_text("L11", 1)
else:
self.set_text("L10", 1)
self.next(0)
self.set_text("P%d" % tool, 2)
self.next(0)
self.next(0)
self.next(0)
def set_origin(self, system):
self.g(0)
self.set_text("G10", 0)
self.next(0)
self.set_text("L20", 1)
self.next(0)
self.set_text("P%d" % system, 2)
self.next(0)<|fim▁end|> | 'G64' : [_('Continuous mode'), 'P'], |
<|file_name|>convert_computation.cc<|end_file_name|><|fim▁begin|>/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Usage: convert_computation <txt2bin|bin2txt> serialized_computation_proto
//
// bin2txt spits out the result to stdout. txt2bin modifies the file in place.
#include <stdio.h>
#include <unistd.h>
#include <string>
#include "tensorflow/compiler/xla/service/hlo.pb.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
namespace tools {
void RealMain(const string& mode, const string& path) {
HloSnapshot module;
tensorflow::Env* env = tensorflow::Env::Default();
if (mode == "txt2bin") {
TF_CHECK_OK(tensorflow::ReadTextProto(env, path, &module));
TF_CHECK_OK(tensorflow::WriteBinaryProto(env, path, module));
} else if (mode == "bin2txt") {
TF_CHECK_OK(tensorflow::ReadBinaryProto(env, path, &module));
string out;
tensorflow::protobuf::TextFormat::PrintToString(module, &out);
fprintf(stdout, "%s", out.c_str());
} else {
LOG(QFATAL) << "unknown mode for computation conversion: " << mode;
}
}
} // namespace tools
} // namespace xla
int main(int argc, char** argv) {
tensorflow::port::InitMain(argv[0], &argc, &argv);
QCHECK_EQ(argc, 3) << "usage: " << argv[0] << " <txt2bin|bin2txt> <path>";
xla::tools::RealMain(argv[1], argv[2]);
return 0;
}<|fim▁end|> | |
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use std::fmt;
#[derive(Debug, PartialEq, Clone)]
pub struct LexerError {
line: usize,
col: usize,
}
impl LexerError {
pub fn new(line: usize, col: usize) -> LexerError {
LexerError {
line: line,
col: col,
}
}
}
impl fmt::Display for LexerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Invalid syntax at {}:{}", self.line, self.col)
}
}
#[cfg(test)]
mod tests {
use super::LexerError;
#[test]
fn test_descriptions_for_error() {
let err = LexerError::new(1, 10);<|fim▁hole|>}<|fim▁end|> | assert_eq!("Invalid syntax at 1:10", format!("{}", err));
} |
<|file_name|>test_rbac_resolvers.py<|end_file_name|><|fim▁begin|># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2common.services import rbac as rbac_services
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.rbac.types import SystemRole
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.pack import Pack
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.pack import PackDB
from st2common.rbac.resolvers import get_resolver_for_resource_type
from st2common.rbac.migrations import insert_system_roles
from st2tests.base import CleanDbTestCase
__all__ = [
'BasePermissionsResolverTestCase',
'PermissionsResolverUtilsTestCase'
]
class BasePermissionsResolverTestCase(CleanDbTestCase):
def setUp(self):
super(BasePermissionsResolverTestCase, self).setUp()
# Make sure RBAC is enabeld
cfg.CONF.set_override(name='enable', override=True, group='rbac')
self.users = {}
self.roles = {}
self.resources = {}
# Run role "migrations"
insert_system_roles()
# Insert common mock objects
self._insert_common_mocks()
def _user_has_resource_db_permissions(self, resolver, user_db, resource_db, permission_types):
"""
Method which verifies that user has all the provided permissions.
"""
self.assertTrue(isinstance(permission_types, (list, tuple)))
self.assertTrue(len(permission_types) > 1)
for permission_type in permission_types:
result = resolver.user_has_resource_db_permission(
user_db=user_db,
resource_db=resource_db,
permission_type=permission_type)
if not result:
return False
return True<|fim▁hole|> self._insert_common_mock_resources()
self._insert_common_mock_roles()
self._insert_common_mock_role_assignments()
def _insert_common_mock_users(self):
# Insert common mock users
user_1_db = UserDB(name='admin')
user_1_db = User.add_or_update(user_1_db)
self.users['admin'] = user_1_db
user_2_db = UserDB(name='observer')
user_2_db = User.add_or_update(user_2_db)
self.users['observer'] = user_2_db
user_3_db = UserDB(name='no_roles')
user_3_db = User.add_or_update(user_3_db)
self.users['no_roles'] = user_3_db
user_4_db = UserDB(name='1_custom_role_no_permissions')
user_4_db = User.add_or_update(user_4_db)
self.users['1_custom_role_no_permissions'] = user_4_db
user_5_db = UserDB(name='1_role_pack_grant')
user_5_db = User.add_or_update(user_5_db)
self.users['custom_role_pack_grant'] = user_5_db
def _insert_common_mock_resources(self):
pack_1_db = PackDB(name='test_pack_1', ref='test_pack_1', description='',
version='0.1.0', author='foo', email='[email protected]')
pack_1_db = Pack.add_or_update(pack_1_db)
self.resources['pack_1'] = pack_1_db
pack_2_db = PackDB(name='test_pack_2', ref='test_pack_2', description='',
version='0.1.0', author='foo', email='[email protected]')
pack_2_db = Pack.add_or_update(pack_2_db)
self.resources['pack_2'] = pack_2_db
def _insert_common_mock_roles(self):
# Insert common mock roles
admin_role_db = rbac_services.get_role_by_name(name=SystemRole.ADMIN)
observer_role_db = rbac_services.get_role_by_name(name=SystemRole.OBSERVER)
self.roles['admin_role'] = admin_role_db
self.roles['observer_role'] = observer_role_db
# Custom role 1 - no grants
role_1_db = rbac_services.create_role(name='custom_role_1')
self.roles['custom_role_1'] = role_1_db
# Custom role 2 - one grant on pack_1
# "pack_create" on pack_1
grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(),
resource_type=ResourceType.PACK,
permission_types=[PermissionType.PACK_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_3_db = RoleDB(name='custom_role_pack_grant', permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['custom_role_pack_grant'] = role_3_db
def _insert_common_mock_role_assignments(self):
# Insert common mock role assignments
role_assignment_admin = UserRoleAssignmentDB(user=self.users['admin'].name,
role=self.roles['admin_role'].name)
role_assignment_admin = UserRoleAssignment.add_or_update(role_assignment_admin)
role_assignment_observer = UserRoleAssignmentDB(user=self.users['observer'].name,
role=self.roles['observer_role'].name)
role_assignment_observer = UserRoleAssignment.add_or_update(role_assignment_observer)
user_db = self.users['1_custom_role_no_permissions']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_1'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['custom_role_pack_grant']
role_assignment_db = UserRoleAssignmentDB(user=user_db.name,
role=self.roles['custom_role_pack_grant'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
class PermissionsResolverUtilsTestCase(unittest2.TestCase):
def test_get_resolver_for_resource_type_valid_resource_type(self):
valid_resources_types = [ResourceType.PACK, ResourceType.SENSOR, ResourceType.ACTION,
ResourceType.RULE, ResourceType.EXECUTION,
ResourceType.KEY_VALUE_PAIR,
ResourceType.WEBHOOK]
for resource_type in valid_resources_types:
resolver_instance = get_resolver_for_resource_type(resource_type=resource_type)
resource_name = resource_type.split('_')[0].lower()
class_name = resolver_instance.__class__.__name__.lower()
self.assertTrue(resource_name in class_name)
def test_get_resolver_for_resource_type_unsupported_resource_type(self):
expected_msg = 'Unsupported resource: alias'
self.assertRaisesRegexp(ValueError, expected_msg, get_resolver_for_resource_type,
resource_type='alias')<|fim▁end|> |
def _insert_common_mocks(self):
self._insert_common_mock_users() |
<|file_name|>test_util.py<|end_file_name|><|fim▁begin|>import unittest
from tests.test_basic import BaseTestCase
from datetime import timedelta, datetime, tzinfo
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):<|fim▁hole|> return timedelta(0)
class UtilTestCase(BaseTestCase):
"""
Tests utils
"""
def test_parse_iso_8601_time_str(self):
"""
At times, Amazon hands us a timestamp with no microseconds.
"""
import datetime
from route53.util import parse_iso_8601_time_str
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 0, \
tzinfo=UTC()))
self.assertEqual(parse_iso_8601_time_str('2013-07-28T01:00:01.001Z'),
datetime.datetime(2013, 7, 28, 1, 0, 1, 1000, \
tzinfo=UTC()))<|fim▁end|> | return "UTC"
def dst(self, dt): |
<|file_name|>grp_java.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
java = [
{'name':'common',
'mainpackage':True,
'shortdesc':'Installs the latest version of Java',
'description':'',
'packages-trusty':['openjdk-7-jre-lib'],
'packages-xenial':[],
'packages-bionic':[],
'packages-focal':[],
'packages-groovy':[],
'side-by-side':['jre-headless', 'jre', 'jdk'],
},
{'name':'jre-headless',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'', <|fim▁hole|> 'packages-xenial':['openjdk-8-jre-headless'],
'packages-bionic':['openjdk-8-jre-headless'],
'packages-focal':['openjdk-11-jre-headless'],
'packages-groovy':['openjdk-11-jre-headless'],
},
{'name':'jre',
'shortdesc':'Installs the latest version of the Java Runtime Environment',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jre', 'openjdk-8-jre'],
'packages-xenial':['openjdk-8-jre'],
'packages-bionic':['openjdk-8-jre'],
'packages-focal':['openjdk-11-jre'],
'packages-groovy':['openjdk-11-jre'],
},
{'name':'jdk',
'shortdesc':'Installs the latest version of the Java Development Kit',
'description':'',
'depends':['jre'],
'packages-trusty':['openjdk-7-jdk', 'openjdk-8-jdk'],
'packages-xenial':['openjdk-8-jdk'],
'packages-bionic':['openjdk-8-jdk'],
'packages-focal':['openjdk-11-jdk'],
'packages-groovy':['openjdk-11-jdk'],
},
{'name':'jdk-headless',
'shortdesc':'Installs the latest version of the Java Development Kit',
'description':'',
'depends':['jre-headless'],
'packages-trusty':['openjdk-7-jdk-headless', 'openjdk-8-jdk-headless'],
'packages-xenial':['openjdk-8-jdk-headless'],
'packages-bionic':['openjdk-8-jdk-headless'],
'packages-focal':['openjdk-11-jdk-headless'],
'packages-groovy':['openjdk-11-jdk-headless'],
},
{'name':'none',
'shortdesc':'Uninstalls all versions of Java',
'description':'',
'packages':[],
'noconflicts':[]
},
]<|fim▁end|> | 'depends':['common'],
'packages-trusty':['openjdk-7-jre-headless', 'openjdk-8-jre-headless'], |
<|file_name|>datepicker.js<|end_file_name|><|fim▁begin|>/*! jQuery UI - v1.11.4 - 2015-12-06
* http://jqueryui.com
* Includes: core.js, datepicker.js
* Copyright jQuery Foundation and other contributors; Licensed MIT */
(function( factory ) {
if ( typeof define === "function" && define.amd ) {
// AMD. Register as an anonymous module.
define([ "jquery" ], factory );
} else {
// Browser globals
factory( jQuery );
}
}(function( $ ) {
/*!
* jQuery UI Core 1.11.4
* http://jqueryui.com
*
* Copyright jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*
* http://api.jqueryui.com/category/ui-core/
*/
// $.ui might exist from components with no dependencies, e.g., $.ui.position
$.ui = $.ui || {};
$.extend( $.ui, {
version: "1.11.4",
keyCode: {
BACKSPACE: 8,
COMMA: 188,
DELETE: 46,
DOWN: 40,
END: 35,
ENTER: 13,
ESCAPE: 27,
HOME: 36,
LEFT: 37,
PAGE_DOWN: 34,
PAGE_UP: 33,
PERIOD: 190,
RIGHT: 39,
SPACE: 32,
TAB: 9,
UP: 38
}
});
// plugins
$.fn.extend({
scrollParent: function( includeHidden ) {
var position = this.css( "position" ),
excludeStaticParent = position === "absolute",
overflowRegex = includeHidden ? /(auto|scroll|hidden)/ : /(auto|scroll)/,
scrollParent = this.parents().filter( function() {
var parent = $( this );
if ( excludeStaticParent && parent.css( "position" ) === "static" ) {
return false;
}
return overflowRegex.test( parent.css( "overflow" ) + parent.css( "overflow-y" ) + parent.css( "overflow-x" ) );
}).eq( 0 );
return position === "fixed" || !scrollParent.length ? $( this[ 0 ].ownerDocument || document ) : scrollParent;
},
uniqueId: (function() {
var uuid = 0;
return function() {
return this.each(function() {
if ( !this.id ) {
this.id = "ui-id-" + ( ++uuid );
}
});
};
})(),
removeUniqueId: function() {
return this.each(function() {
if ( /^ui-id-\d+$/.test( this.id ) ) {
$( this ).removeAttr( "id" );
}
});
}
});
// selectors
function focusable( element, isTabIndexNotNaN ) {
var map, mapName, img,
nodeName = element.nodeName.toLowerCase();
if ( "area" === nodeName ) {
map = element.parentNode;
mapName = map.name;
if ( !element.href || !mapName || map.nodeName.toLowerCase() !== "map" ) {
return false;
}
img = $( "img[usemap='#" + mapName + "']" )[ 0 ];
return !!img && visible( img );
}
return ( /^(input|select|textarea|button|object)$/.test( nodeName ) ?
!element.disabled :
"a" === nodeName ?
element.href || isTabIndexNotNaN :
isTabIndexNotNaN) &&
// the element and all of its ancestors must be visible
visible( element );
}
function visible( element ) {
return $.expr.filters.visible( element ) &&
!$( element ).parents().addBack().filter(function() {
return $.css( this, "visibility" ) === "hidden";
}).length;
}
$.extend( $.expr[ ":" ], {
data: $.expr.createPseudo ?
$.expr.createPseudo(function( dataName ) {
return function( elem ) {
return !!$.data( elem, dataName );
};
}) :
// support: jQuery <1.8
function( elem, i, match ) {
return !!$.data( elem, match[ 3 ] );
},
focusable: function( element ) {
return focusable( element, !isNaN( $.attr( element, "tabindex" ) ) );
},
tabbable: function( element ) {
var tabIndex = $.attr( element, "tabindex" ),
isTabIndexNaN = isNaN( tabIndex );
return ( isTabIndexNaN || tabIndex >= 0 ) && focusable( element, !isTabIndexNaN );
}
});
// support: jQuery <1.8
if ( !$( "<a>" ).outerWidth( 1 ).jquery ) {
$.each( [ "Width", "Height" ], function( i, name ) {
var side = name === "Width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ],
type = name.toLowerCase(),
orig = {
innerWidth: $.fn.innerWidth,
innerHeight: $.fn.innerHeight,
outerWidth: $.fn.outerWidth,
outerHeight: $.fn.outerHeight
};
function reduce( elem, size, border, margin ) {
$.each( side, function() {
size -= parseFloat( $.css( elem, "padding" + this ) ) || 0;
if ( border ) {
size -= parseFloat( $.css( elem, "border" + this + "Width" ) ) || 0;
}
if ( margin ) {
size -= parseFloat( $.css( elem, "margin" + this ) ) || 0;
}
});
return size;
}
$.fn[ "inner" + name ] = function( size ) {
if ( size === undefined ) {
return orig[ "inner" + name ].call( this );
}
return this.each(function() {
$( this ).css( type, reduce( this, size ) + "px" );
});
};
$.fn[ "outer" + name] = function( size, margin ) {
if ( typeof size !== "number" ) {
return orig[ "outer" + name ].call( this, size );
}
return this.each(function() {
$( this).css( type, reduce( this, size, true, margin ) + "px" );
});
};
});
}
// support: jQuery <1.8
if ( !$.fn.addBack ) {
$.fn.addBack = function( selector ) {
return this.add( selector == null ?
this.prevObject : this.prevObject.filter( selector )
);
};
}
// support: jQuery 1.6.1, 1.6.2 (http://bugs.jquery.com/ticket/9413)
if ( $( "<a>" ).data( "a-b", "a" ).removeData( "a-b" ).data( "a-b" ) ) {
$.fn.removeData = (function( removeData ) {
return function( key ) {
if ( arguments.length ) {
return removeData.call( this, $.camelCase( key ) );
} else {
return removeData.call( this );
}
};
})( $.fn.removeData );
}
// deprecated
$.ui.ie = !!/msie [\w.]+/.exec( navigator.userAgent.toLowerCase() );
$.fn.extend({
focus: (function( orig ) {
return function( delay, fn ) {
return typeof delay === "number" ?
this.each(function() {
var elem = this;
setTimeout(function() {
$( elem ).focus();
if ( fn ) {
fn.call( elem );
}
}, delay );
}) :
orig.apply( this, arguments );
};
})( $.fn.focus ),
disableSelection: (function() {
var eventType = "onselectstart" in document.createElement( "div" ) ?
"selectstart" :
"mousedown";
return function() {
return this.bind( eventType + ".ui-disableSelection", function( event ) {
event.preventDefault();
});
};
})(),
enableSelection: function() {
return this.unbind( ".ui-disableSelection" );
},
zIndex: function( zIndex ) {
if ( zIndex !== undefined ) {
return this.css( "zIndex", zIndex );
}
if ( this.length ) {
var elem = $( this[ 0 ] ), position, value;
while ( elem.length && elem[ 0 ] !== document ) {
// Ignore z-index if position is set to a value where z-index is ignored by the browser
// This makes behavior of this function consistent across browsers
// WebKit always returns auto if the element is positioned
position = elem.css( "position" );
if ( position === "absolute" || position === "relative" || position === "fixed" ) {
// IE returns 0 when zIndex is not specified
// other browsers return a string
// we ignore the case of nested elements with an explicit value of 0
// <div style="z-index: -10;"><div style="z-index: 0;"></div></div>
value = parseInt( elem.css( "zIndex" ), 10 );
if ( !isNaN( value ) && value !== 0 ) {
return value;
}
}
elem = elem.parent();
}
}
return 0;
}
});
// $.ui.plugin is deprecated. Use $.widget() extensions instead.
$.ui.plugin = {
add: function( module, option, set ) {
var i,
proto = $.ui[ module ].prototype;
for ( i in set ) {
proto.plugins[ i ] = proto.plugins[ i ] || [];
proto.plugins[ i ].push( [ option, set[ i ] ] );
}
},
call: function( instance, name, args, allowDisconnected ) {
var i,
set = instance.plugins[ name ];
if ( !set ) {
return;
}
if ( !allowDisconnected && ( !instance.element[ 0 ].parentNode || instance.element[ 0 ].parentNode.nodeType === 11 ) ) {
return;
}
for ( i = 0; i < set.length; i++ ) {
if ( instance.options[ set[ i ][ 0 ] ] ) {
set[ i ][ 1 ].apply( instance.element, args );
}
}
}
};
/*!
* jQuery UI Datepicker 1.11.4
* http://jqueryui.com
*
* Copyright jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*
* http://api.jqueryui.com/datepicker/
*/
$.extend($.ui, { datepicker: { version: "1.11.4" } });
var datepicker_instActive;
function datepicker_getZindex( elem ) {
var position, value;
while ( elem.length && elem[ 0 ] !== document ) {
// Ignore z-index if position is set to a value where z-index is ignored by the browser
// This makes behavior of this function consistent across browsers
// WebKit always returns auto if the element is positioned
position = elem.css( "position" );
if ( position === "absolute" || position === "relative" || position === "fixed" ) {
// IE returns 0 when zIndex is not specified
// other browsers return a string
// we ignore the case of nested elements with an explicit value of 0
// <div style="z-index: -10;"><div style="z-index: 0;"></div></div>
value = parseInt( elem.css( "zIndex" ), 10 );
if ( !isNaN( value ) && value !== 0 ) {
return value;
}
}
elem = elem.parent();
}
return 0;
}
/* Date picker manager.
Use the singleton instance of this class, $.datepicker, to interact with the date picker.
Settings for (groups of) date pickers are maintained in an instance object,
allowing multiple different settings on the same page. */
function Datepicker() {
this._curInst = null; // The current instance in use
this._keyEvent = false; // If the last event was a key event
this._disabledInputs = []; // List of date picker inputs that have been disabled
this._datepickerShowing = false; // True if the popup picker is showing , false if not
this._inDialog = false; // True if showing within a "dialog", false if not
this._mainDivId = "ui-datepicker-div"; // The ID of the main datepicker division
this._inlineClass = "ui-datepicker-inline"; // The name of the inline marker class
this._appendClass = "ui-datepicker-append"; // The name of the append marker class
this._triggerClass = "ui-datepicker-trigger"; // The name of the trigger marker class
this._dialogClass = "ui-datepicker-dialog"; // The name of the dialog marker class
this._disableClass = "ui-datepicker-disabled"; // The name of the disabled covering marker class
this._unselectableClass = "ui-datepicker-unselectable"; // The name of the unselectable cell marker class
this._currentClass = "ui-datepicker-current-day"; // The name of the current day marker class
this._dayOverClass = "ui-datepicker-days-cell-over"; // The name of the day hover marker class
this.regional = []; // Available regional settings, indexed by language code
this.regional[""] = { // Default regional settings
closeText: "Done", // Display text for close link
prevText: "Prev", // Display text for previous month link
nextText: "Next", // Display text for next month link
currentText: "Today", // Display text for current month link
monthNames: ["January","February","March","April","May","June",
"July","August","September","October","November","December"], // Names of months for drop-down and formatting
monthNamesShort: ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"], // For formatting
dayNames: ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"], // For formatting
dayNamesShort: ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"], // For formatting
dayNamesMin: ["Su","Mo","Tu","We","Th","Fr","Sa"], // Column headings for days starting at Sunday
weekHeader: "Wk", // Column header for week of the year
dateFormat: "mm/dd/yy", // See format options on parseDate
firstDay: 0, // The first day of the week, Sun = 0, Mon = 1, ...
isRTL: false, // True if right-to-left language, false if left-to-right
showMonthAfterYear: false, // True if the year select precedes month, false for month then year
yearSuffix: "" // Additional text to append to the year in the month headers
};
this._defaults = { // Global defaults for all the date picker instances
showOn: "focus", // "focus" for popup on focus,
// "button" for trigger button, or "both" for either
showAnim: "fadeIn", // Name of jQuery animation for popup
showOptions: {}, // Options for enhanced animations
defaultDate: null, // Used when field is blank: actual date,
// +/-number for offset from today, null for today
appendText: "", // Display text following the input box, e.g. showing the format
buttonText: "...", // Text for trigger button
buttonImage: "", // URL for trigger button image
buttonImageOnly: false, // True if the image appears alone, false if it appears on a button
hideIfNoPrevNext: false, // True to hide next/previous month links
// if not applicable, false to just disable them
navigationAsDateFormat: false, // True if date formatting applied to prev/today/next links
gotoCurrent: false, // True if today link goes back to current selection instead
changeMonth: false, // True if month can be selected directly, false if only prev/next
changeYear: false, // True if year can be selected directly, false if only prev/next
yearRange: "c-10:c+10", // Range of years to display in drop-down,
// either relative to today's year (-nn:+nn), relative to currently displayed year
// (c-nn:c+nn), absolute (nnnn:nnnn), or a combination of the above (nnnn:-n)
showOtherMonths: false, // True to show dates in other months, false to leave blank
selectOtherMonths: false, // True to allow selection of dates in other months, false for unselectable
showWeek: false, // True to show week of the year, false to not show it
calculateWeek: this.iso8601Week, // How to calculate the week of the year,
// takes a Date and returns the number of the week for it
shortYearCutoff: "+10", // Short year values < this are in the current century,
// > this are in the previous century,
// string value starting with "+" for current year + value
minDate: null, // The earliest selectable date, or null for no limit
maxDate: null, // The latest selectable date, or null for no limit
duration: "fast", // Duration of display/closure
beforeShowDay: null, // Function that takes a date and returns an array with
// [0] = true if selectable, false if not, [1] = custom CSS class name(s) or "",
// [2] = cell title (optional), e.g. $.datepicker.noWeekends
beforeShow: null, // Function that takes an input field and
// returns a set of custom settings for the date picker<|fim▁hole|> showCurrentAtPos: 0, // The position in multipe months at which to show the current month (starting at 0)
stepMonths: 1, // Number of months to step back/forward
stepBigMonths: 12, // Number of months to step back/forward for the big links
altField: "", // Selector for an alternate field to store selected dates into
altFormat: "", // The date format to use for the alternate field
constrainInput: true, // The input is constrained by the current date format
showButtonPanel: false, // True to show button panel, false to not show it
autoSize: false, // True to size the input for the date format, false to leave as is
disabled: false // The initial disabled state
};
$.extend(this._defaults, this.regional[""]);
this.regional.en = $.extend( true, {}, this.regional[ "" ]);
this.regional[ "en-US" ] = $.extend( true, {}, this.regional.en );
this.dpDiv = datepicker_bindHover($("<div id='" + this._mainDivId + "' class='ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all'></div>"));
}
$.extend(Datepicker.prototype, {
/* Class name added to elements to indicate already configured with a date picker. */
markerClassName: "hasDatepicker",
//Keep track of the maximum number of rows displayed (see #7043)
maxRows: 4,
// TODO rename to "widget" when switching to widget factory
_widgetDatepicker: function() {
return this.dpDiv;
},
/* Override the default settings for all instances of the date picker.
* @param settings object - the new settings to use as defaults (anonymous object)
* @return the manager object
*/
setDefaults: function(settings) {
datepicker_extendRemove(this._defaults, settings || {});
return this;
},
/* Attach the date picker to a jQuery selection.
* @param target element - the target input field or division or span
* @param settings object - the new settings to use for this date picker instance (anonymous)
*/
_attachDatepicker: function(target, settings) {
var nodeName, inline, inst;
nodeName = target.nodeName.toLowerCase();
inline = (nodeName === "div" || nodeName === "span");
if (!target.id) {
this.uuid += 1;
target.id = "dp" + this.uuid;
}
inst = this._newInst($(target), inline);
inst.settings = $.extend({}, settings || {});
if (nodeName === "input") {
this._connectDatepicker(target, inst);
} else if (inline) {
this._inlineDatepicker(target, inst);
}
},
/* Create a new instance object. */
_newInst: function(target, inline) {
var id = target[0].id.replace(/([^A-Za-z0-9_\-])/g, "\\\\$1"); // escape jQuery meta chars
return {id: id, input: target, // associated target
selectedDay: 0, selectedMonth: 0, selectedYear: 0, // current selection
drawMonth: 0, drawYear: 0, // month being drawn
inline: inline, // is datepicker inline or not
dpDiv: (!inline ? this.dpDiv : // presentation div
datepicker_bindHover($("<div class='" + this._inlineClass + " ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all'></div>")))};
},
/* Attach the date picker to an input field. */
_connectDatepicker: function(target, inst) {
var input = $(target);
inst.append = $([]);
inst.trigger = $([]);
if (input.hasClass(this.markerClassName)) {
return;
}
this._attachments(input, inst);
input.addClass(this.markerClassName).keydown(this._doKeyDown).
keypress(this._doKeyPress).keyup(this._doKeyUp);
this._autoSize(inst);
$.data(target, "datepicker", inst);
//If disabled option is true, disable the datepicker once it has been attached to the input (see ticket #5665)
if( inst.settings.disabled ) {
this._disableDatepicker( target );
}
},
/* Make attachments based on settings. */
_attachments: function(input, inst) {
var showOn, buttonText, buttonImage,
appendText = this._get(inst, "appendText"),
isRTL = this._get(inst, "isRTL");
if (inst.append) {
inst.append.remove();
}
if (appendText) {
inst.append = $("<span class='" + this._appendClass + "'>" + appendText + "</span>");
input[isRTL ? "before" : "after"](inst.append);
}
input.unbind("focus", this._showDatepicker);
if (inst.trigger) {
inst.trigger.remove();
}
showOn = this._get(inst, "showOn");
if (showOn === "focus" || showOn === "both") { // pop-up date picker when in the marked field
input.focus(this._showDatepicker);
}
if (showOn === "button" || showOn === "both") { // pop-up date picker when button clicked
buttonText = this._get(inst, "buttonText");
buttonImage = this._get(inst, "buttonImage");
inst.trigger = $(this._get(inst, "buttonImageOnly") ?
$("<img/>").addClass(this._triggerClass).
attr({ src: buttonImage, alt: buttonText, title: buttonText }) :
$("<button type='button'></button>").addClass(this._triggerClass).
html(!buttonImage ? buttonText : $("<img/>").attr(
{ src:buttonImage, alt:buttonText, title:buttonText })));
input[isRTL ? "before" : "after"](inst.trigger);
inst.trigger.click(function() {
if ($.datepicker._datepickerShowing && $.datepicker._lastInput === input[0]) {
$.datepicker._hideDatepicker();
} else if ($.datepicker._datepickerShowing && $.datepicker._lastInput !== input[0]) {
$.datepicker._hideDatepicker();
$.datepicker._showDatepicker(input[0]);
} else {
$.datepicker._showDatepicker(input[0]);
}
return false;
});
}
},
/* Apply the maximum length for the date format. */
_autoSize: function(inst) {
if (this._get(inst, "autoSize") && !inst.inline) {
var findMax, max, maxI, i,
date = new Date(2009, 12 - 1, 20), // Ensure double digits
dateFormat = this._get(inst, "dateFormat");
if (dateFormat.match(/[DM]/)) {
findMax = function(names) {
max = 0;
maxI = 0;
for (i = 0; i < names.length; i++) {
if (names[i].length > max) {
max = names[i].length;
maxI = i;
}
}
return maxI;
};
date.setMonth(findMax(this._get(inst, (dateFormat.match(/MM/) ?
"monthNames" : "monthNamesShort"))));
date.setDate(findMax(this._get(inst, (dateFormat.match(/DD/) ?
"dayNames" : "dayNamesShort"))) + 20 - date.getDay());
}
inst.input.attr("size", this._formatDate(inst, date).length);
}
},
/* Attach an inline date picker to a div. */
_inlineDatepicker: function(target, inst) {
var divSpan = $(target);
if (divSpan.hasClass(this.markerClassName)) {
return;
}
divSpan.addClass(this.markerClassName).append(inst.dpDiv);
$.data(target, "datepicker", inst);
this._setDate(inst, this._getDefaultDate(inst), true);
this._updateDatepicker(inst);
this._updateAlternate(inst);
//If disabled option is true, disable the datepicker before showing it (see ticket #5665)
if( inst.settings.disabled ) {
this._disableDatepicker( target );
}
// Set display:block in place of inst.dpDiv.show() which won't work on disconnected elements
// http://bugs.jqueryui.com/ticket/7552 - A Datepicker created on a detached div has zero height
inst.dpDiv.css( "display", "block" );
},
/* Pop-up the date picker in a "dialog" box.
* @param input element - ignored
* @param date string or Date - the initial date to display
* @param onSelect function - the function to call when a date is selected
* @param settings object - update the dialog date picker instance's settings (anonymous object)
* @param pos int[2] - coordinates for the dialog's position within the screen or
* event - with x/y coordinates or
* leave empty for default (screen centre)
* @return the manager object
*/
_dialogDatepicker: function(input, date, onSelect, settings, pos) {
var id, browserWidth, browserHeight, scrollX, scrollY,
inst = this._dialogInst; // internal instance
if (!inst) {
this.uuid += 1;
id = "dp" + this.uuid;
this._dialogInput = $("<input type='text' id='" + id +
"' style='position: absolute; top: -100px; width: 0px;'/>");
this._dialogInput.keydown(this._doKeyDown);
$("body").append(this._dialogInput);
inst = this._dialogInst = this._newInst(this._dialogInput, false);
inst.settings = {};
$.data(this._dialogInput[0], "datepicker", inst);
}
datepicker_extendRemove(inst.settings, settings || {});
date = (date && date.constructor === Date ? this._formatDate(inst, date) : date);
this._dialogInput.val(date);
this._pos = (pos ? (pos.length ? pos : [pos.pageX, pos.pageY]) : null);
if (!this._pos) {
browserWidth = document.documentElement.clientWidth;
browserHeight = document.documentElement.clientHeight;
scrollX = document.documentElement.scrollLeft || document.body.scrollLeft;
scrollY = document.documentElement.scrollTop || document.body.scrollTop;
this._pos = // should use actual width/height below
[(browserWidth / 2) - 100 + scrollX, (browserHeight / 2) - 150 + scrollY];
}
// move input on screen for focus, but hidden behind dialog
this._dialogInput.css("left", (this._pos[0] + 20) + "px").css("top", this._pos[1] + "px");
inst.settings.onSelect = onSelect;
this._inDialog = true;
this.dpDiv.addClass(this._dialogClass);
this._showDatepicker(this._dialogInput[0]);
if ($.blockUI) {
$.blockUI(this.dpDiv);
}
$.data(this._dialogInput[0], "datepicker", inst);
return this;
},
/* Detach a datepicker from its control.
* @param target element - the target input field or division or span
*/
_destroyDatepicker: function(target) {
var nodeName,
$target = $(target),
inst = $.data(target, "datepicker");
if (!$target.hasClass(this.markerClassName)) {
return;
}
nodeName = target.nodeName.toLowerCase();
$.removeData(target, "datepicker");
if (nodeName === "input") {
inst.append.remove();
inst.trigger.remove();
$target.removeClass(this.markerClassName).
unbind("focus", this._showDatepicker).
unbind("keydown", this._doKeyDown).
unbind("keypress", this._doKeyPress).
unbind("keyup", this._doKeyUp);
} else if (nodeName === "div" || nodeName === "span") {
$target.removeClass(this.markerClassName).empty();
}
if ( datepicker_instActive === inst ) {
datepicker_instActive = null;
}
},
/* Enable the date picker to a jQuery selection.
* @param target element - the target input field or division or span
*/
_enableDatepicker: function(target) {
var nodeName, inline,
$target = $(target),
inst = $.data(target, "datepicker");
if (!$target.hasClass(this.markerClassName)) {
return;
}
nodeName = target.nodeName.toLowerCase();
if (nodeName === "input") {
target.disabled = false;
inst.trigger.filter("button").
each(function() { this.disabled = false; }).end().
filter("img").css({opacity: "1.0", cursor: ""});
} else if (nodeName === "div" || nodeName === "span") {
inline = $target.children("." + this._inlineClass);
inline.children().removeClass("ui-state-disabled");
inline.find("select.ui-datepicker-month, select.ui-datepicker-year").
prop("disabled", false);
}
this._disabledInputs = $.map(this._disabledInputs,
function(value) { return (value === target ? null : value); }); // delete entry
},
/* Disable the date picker to a jQuery selection.
* @param target element - the target input field or division or span
*/
_disableDatepicker: function(target) {
var nodeName, inline,
$target = $(target),
inst = $.data(target, "datepicker");
if (!$target.hasClass(this.markerClassName)) {
return;
}
nodeName = target.nodeName.toLowerCase();
if (nodeName === "input") {
target.disabled = true;
inst.trigger.filter("button").
each(function() { this.disabled = true; }).end().
filter("img").css({opacity: "0.5", cursor: "default"});
} else if (nodeName === "div" || nodeName === "span") {
inline = $target.children("." + this._inlineClass);
inline.children().addClass("ui-state-disabled");
inline.find("select.ui-datepicker-month, select.ui-datepicker-year").
prop("disabled", true);
}
this._disabledInputs = $.map(this._disabledInputs,
function(value) { return (value === target ? null : value); }); // delete entry
this._disabledInputs[this._disabledInputs.length] = target;
},
/* Is the first field in a jQuery collection disabled as a datepicker?
* @param target element - the target input field or division or span
* @return boolean - true if disabled, false if enabled
*/
_isDisabledDatepicker: function(target) {
if (!target) {
return false;
}
for (var i = 0; i < this._disabledInputs.length; i++) {
if (this._disabledInputs[i] === target) {
return true;
}
}
return false;
},
/* Retrieve the instance data for the target control.
* @param target element - the target input field or division or span
* @return object - the associated instance data
* @throws error if a jQuery problem getting data
*/
_getInst: function(target) {
try {
return $.data(target, "datepicker");
}
catch (err) {
throw "Missing instance data for this datepicker";
}
},
/* Update or retrieve the settings for a date picker attached to an input field or division.
* @param target element - the target input field or division or span
* @param name object - the new settings to update or
* string - the name of the setting to change or retrieve,
* when retrieving also "all" for all instance settings or
* "defaults" for all global defaults
* @param value any - the new value for the setting
* (omit if above is an object or to retrieve a value)
*/
_optionDatepicker: function(target, name, value) {
var settings, date, minDate, maxDate,
inst = this._getInst(target);
if (arguments.length === 2 && typeof name === "string") {
return (name === "defaults" ? $.extend({}, $.datepicker._defaults) :
(inst ? (name === "all" ? $.extend({}, inst.settings) :
this._get(inst, name)) : null));
}
settings = name || {};
if (typeof name === "string") {
settings = {};
settings[name] = value;
}
if (inst) {
if (this._curInst === inst) {
this._hideDatepicker();
}
date = this._getDateDatepicker(target, true);
minDate = this._getMinMaxDate(inst, "min");
maxDate = this._getMinMaxDate(inst, "max");
datepicker_extendRemove(inst.settings, settings);
// reformat the old minDate/maxDate values if dateFormat changes and a new minDate/maxDate isn't provided
if (minDate !== null && settings.dateFormat !== undefined && settings.minDate === undefined) {
inst.settings.minDate = this._formatDate(inst, minDate);
}
if (maxDate !== null && settings.dateFormat !== undefined && settings.maxDate === undefined) {
inst.settings.maxDate = this._formatDate(inst, maxDate);
}
if ( "disabled" in settings ) {
if ( settings.disabled ) {
this._disableDatepicker(target);
} else {
this._enableDatepicker(target);
}
}
this._attachments($(target), inst);
this._autoSize(inst);
this._setDate(inst, date);
this._updateAlternate(inst);
this._updateDatepicker(inst);
}
},
// change method deprecated
_changeDatepicker: function(target, name, value) {
this._optionDatepicker(target, name, value);
},
/* Redraw the date picker attached to an input field or division.
* @param target element - the target input field or division or span
*/
_refreshDatepicker: function(target) {
var inst = this._getInst(target);
if (inst) {
this._updateDatepicker(inst);
}
},
/* Set the dates for a jQuery selection.
* @param target element - the target input field or division or span
* @param date Date - the new date
*/
_setDateDatepicker: function(target, date) {
var inst = this._getInst(target);
if (inst) {
this._setDate(inst, date);
this._updateDatepicker(inst);
this._updateAlternate(inst);
}
},
/* Get the date(s) for the first entry in a jQuery selection.
* @param target element - the target input field or division or span
* @param noDefault boolean - true if no default date is to be used
* @return Date - the current date
*/
_getDateDatepicker: function(target, noDefault) {
var inst = this._getInst(target);
if (inst && !inst.inline) {
this._setDateFromField(inst, noDefault);
}
return (inst ? this._getDate(inst) : null);
},
/* Handle keystrokes. */
_doKeyDown: function(event) {
var onSelect, dateStr, sel,
inst = $.datepicker._getInst(event.target),
handled = true,
isRTL = inst.dpDiv.is(".ui-datepicker-rtl");
inst._keyEvent = true;
if ($.datepicker._datepickerShowing) {
switch (event.keyCode) {
case 9: $.datepicker._hideDatepicker();
handled = false;
break; // hide on tab out
case 13: sel = $("td." + $.datepicker._dayOverClass + ":not(." +
$.datepicker._currentClass + ")", inst.dpDiv);
if (sel[0]) {
$.datepicker._selectDay(event.target, inst.selectedMonth, inst.selectedYear, sel[0]);
}
onSelect = $.datepicker._get(inst, "onSelect");
if (onSelect) {
dateStr = $.datepicker._formatDate(inst);
// trigger custom callback
onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]);
} else {
$.datepicker._hideDatepicker();
}
return false; // don't submit the form
case 27: $.datepicker._hideDatepicker();
break; // hide on escape
case 33: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-$.datepicker._get(inst, "stepBigMonths") :
-$.datepicker._get(inst, "stepMonths")), "M");
break; // previous month/year on page up/+ ctrl
case 34: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
+$.datepicker._get(inst, "stepBigMonths") :
+$.datepicker._get(inst, "stepMonths")), "M");
break; // next month/year on page down/+ ctrl
case 35: if (event.ctrlKey || event.metaKey) {
$.datepicker._clearDate(event.target);
}
handled = event.ctrlKey || event.metaKey;
break; // clear on ctrl or command +end
case 36: if (event.ctrlKey || event.metaKey) {
$.datepicker._gotoToday(event.target);
}
handled = event.ctrlKey || event.metaKey;
break; // current on ctrl or command +home
case 37: if (event.ctrlKey || event.metaKey) {
$.datepicker._adjustDate(event.target, (isRTL ? +1 : -1), "D");
}
handled = event.ctrlKey || event.metaKey;
// -1 day on ctrl or command +left
if (event.originalEvent.altKey) {
$.datepicker._adjustDate(event.target, (event.ctrlKey ?
-$.datepicker._get(inst, "stepBigMonths") :
-$.datepicker._get(inst, "stepMonths")), "M");
}
// next month/year on alt +left on Mac
break;
case 38: if (event.ctrlKey || event.metaKey) {
$.datepicker._adjustDate(event.target, -7, "D");
}
handled = event.ctrlKey || event.metaKey;
break; // -1 week on ctrl or command +up
case 39: if (event.ctrlKey || event.metaKey) {
$.datepicker._adjustDate(event.target, (isRTL ? -1 : +1), "D");
}
handled = event.ctrlKey || event.metaKey;
// +1 day on ctrl or command +right
if (event.originalEvent.altKey) {
$.datepicker._adjustDate(event.target, (event.ctrlKey ?
+$.datepicker._get(inst, "stepBigMonths") :
+$.datepicker._get(inst, "stepMonths")), "M");
}
// next month/year on alt +right
break;
case 40: if (event.ctrlKey || event.metaKey) {
$.datepicker._adjustDate(event.target, +7, "D");
}
handled = event.ctrlKey || event.metaKey;
break; // +1 week on ctrl or command +down
default: handled = false;
}
} else if (event.keyCode === 36 && event.ctrlKey) { // display the date picker on ctrl+home
$.datepicker._showDatepicker(this);
} else {
handled = false;
}
if (handled) {
event.preventDefault();
event.stopPropagation();
}
},
/* Filter entered characters - based on date format. */
_doKeyPress: function(event) {
var chars, chr,
inst = $.datepicker._getInst(event.target);
if ($.datepicker._get(inst, "constrainInput")) {
chars = $.datepicker._possibleChars($.datepicker._get(inst, "dateFormat"));
chr = String.fromCharCode(event.charCode == null ? event.keyCode : event.charCode);
return event.ctrlKey || event.metaKey || (chr < " " || !chars || chars.indexOf(chr) > -1);
}
},
/* Synchronise manual entry and field/alternate field. */
_doKeyUp: function(event) {
var date,
inst = $.datepicker._getInst(event.target);
if (inst.input.val() !== inst.lastVal) {
try {
date = $.datepicker.parseDate($.datepicker._get(inst, "dateFormat"),
(inst.input ? inst.input.val() : null),
$.datepicker._getFormatConfig(inst));
if (date) { // only if valid
$.datepicker._setDateFromField(inst);
$.datepicker._updateAlternate(inst);
$.datepicker._updateDatepicker(inst);
}
}
catch (err) {
}
}
return true;
},
/* Pop-up the date picker for a given input field.
* If false returned from beforeShow event handler do not show.
* @param input element - the input field attached to the date picker or
* event - if triggered by focus
*/
_showDatepicker: function(input) {
input = input.target || input;
if (input.nodeName.toLowerCase() !== "input") { // find from button/image trigger
input = $("input", input.parentNode)[0];
}
if ($.datepicker._isDisabledDatepicker(input) || $.datepicker._lastInput === input) { // already here
return;
}
var inst, beforeShow, beforeShowSettings, isFixed,
offset, showAnim, duration;
inst = $.datepicker._getInst(input);
if ($.datepicker._curInst && $.datepicker._curInst !== inst) {
$.datepicker._curInst.dpDiv.stop(true, true);
if ( inst && $.datepicker._datepickerShowing ) {
$.datepicker._hideDatepicker( $.datepicker._curInst.input[0] );
}
}
beforeShow = $.datepicker._get(inst, "beforeShow");
beforeShowSettings = beforeShow ? beforeShow.apply(input, [input, inst]) : {};
if(beforeShowSettings === false){
return;
}
datepicker_extendRemove(inst.settings, beforeShowSettings);
inst.lastVal = null;
$.datepicker._lastInput = input;
$.datepicker._setDateFromField(inst);
if ($.datepicker._inDialog) { // hide cursor
input.value = "";
}
if (!$.datepicker._pos) { // position below input
$.datepicker._pos = $.datepicker._findPos(input);
$.datepicker._pos[1] += input.offsetHeight; // add the height
}
isFixed = false;
$(input).parents().each(function() {
isFixed |= $(this).css("position") === "fixed";
return !isFixed;
});
offset = {left: $.datepicker._pos[0], top: $.datepicker._pos[1]};
$.datepicker._pos = null;
//to avoid flashes on Firefox
inst.dpDiv.empty();
// determine sizing offscreen
inst.dpDiv.css({position: "absolute", display: "block", top: "-1000px"});
$.datepicker._updateDatepicker(inst);
// fix width for dynamic number of date pickers
// and adjust position before showing
offset = $.datepicker._checkOffset(inst, offset, isFixed);
inst.dpDiv.css({position: ($.datepicker._inDialog && $.blockUI ?
"static" : (isFixed ? "fixed" : "absolute")), display: "none",
left: offset.left + "px", top: offset.top + "px"});
if (!inst.inline) {
showAnim = $.datepicker._get(inst, "showAnim");
duration = $.datepicker._get(inst, "duration");
inst.dpDiv.css( "z-index", datepicker_getZindex( $( input ) ) + 1 );
$.datepicker._datepickerShowing = true;
if ( $.effects && $.effects.effect[ showAnim ] ) {
inst.dpDiv.show(showAnim, $.datepicker._get(inst, "showOptions"), duration);
} else {
inst.dpDiv[showAnim || "show"](showAnim ? duration : null);
}
if ( $.datepicker._shouldFocusInput( inst ) ) {
inst.input.focus();
}
$.datepicker._curInst = inst;
}
},
/* Generate the date picker content. */
_updateDatepicker: function(inst) {
this.maxRows = 4; //Reset the max number of rows being displayed (see #7043)
datepicker_instActive = inst; // for delegate hover events
inst.dpDiv.empty().append(this._generateHTML(inst));
this._attachHandlers(inst);
var origyearshtml,
numMonths = this._getNumberOfMonths(inst),
cols = numMonths[1],
width = 17,
activeCell = inst.dpDiv.find( "." + this._dayOverClass + " a" );
if ( activeCell.length > 0 ) {
datepicker_handleMouseover.apply( activeCell.get( 0 ) );
}
inst.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width("");
if (cols > 1) {
inst.dpDiv.addClass("ui-datepicker-multi-" + cols).css("width", (width * cols) + "em");
}
inst.dpDiv[(numMonths[0] !== 1 || numMonths[1] !== 1 ? "add" : "remove") +
"Class"]("ui-datepicker-multi");
inst.dpDiv[(this._get(inst, "isRTL") ? "add" : "remove") +
"Class"]("ui-datepicker-rtl");
if (inst === $.datepicker._curInst && $.datepicker._datepickerShowing && $.datepicker._shouldFocusInput( inst ) ) {
inst.input.focus();
}
// deffered render of the years select (to avoid flashes on Firefox)
if( inst.yearshtml ){
origyearshtml = inst.yearshtml;
setTimeout(function(){
//assure that inst.yearshtml didn't change.
if( origyearshtml === inst.yearshtml && inst.yearshtml ){
inst.dpDiv.find("select.ui-datepicker-year:first").replaceWith(inst.yearshtml);
}
origyearshtml = inst.yearshtml = null;
}, 0);
}
},
// #6694 - don't focus the input if it's already focused
// this breaks the change event in IE
// Support: IE and jQuery <1.9
_shouldFocusInput: function( inst ) {
return inst.input && inst.input.is( ":visible" ) && !inst.input.is( ":disabled" ) && !inst.input.is( ":focus" );
},
/* Check positioning to remain on screen. */
_checkOffset: function(inst, offset, isFixed) {
var dpWidth = inst.dpDiv.outerWidth(),
dpHeight = inst.dpDiv.outerHeight(),
inputWidth = inst.input ? inst.input.outerWidth() : 0,
inputHeight = inst.input ? inst.input.outerHeight() : 0,
viewWidth = document.documentElement.clientWidth + (isFixed ? 0 : $(document).scrollLeft()),
viewHeight = document.documentElement.clientHeight + (isFixed ? 0 : $(document).scrollTop());
offset.left -= (this._get(inst, "isRTL") ? (dpWidth - inputWidth) : 0);
offset.left -= (isFixed && offset.left === inst.input.offset().left) ? $(document).scrollLeft() : 0;
offset.top -= (isFixed && offset.top === (inst.input.offset().top + inputHeight)) ? $(document).scrollTop() : 0;
// now check if datepicker is showing outside window viewport - move to a better place if so.
offset.left -= Math.min(offset.left, (offset.left + dpWidth > viewWidth && viewWidth > dpWidth) ?
Math.abs(offset.left + dpWidth - viewWidth) : 0);
offset.top -= Math.min(offset.top, (offset.top + dpHeight > viewHeight && viewHeight > dpHeight) ?
Math.abs(dpHeight + inputHeight) : 0);
return offset;
},
/* Find an object's position on the screen. */
_findPos: function(obj) {
var position,
inst = this._getInst(obj),
isRTL = this._get(inst, "isRTL");
while (obj && (obj.type === "hidden" || obj.nodeType !== 1 || $.expr.filters.hidden(obj))) {
obj = obj[isRTL ? "previousSibling" : "nextSibling"];
}
position = $(obj).offset();
return [position.left, position.top];
},
/* Hide the date picker from view.
* @param input element - the input field attached to the date picker
*/
_hideDatepicker: function(input) {
var showAnim, duration, postProcess, onClose,
inst = this._curInst;
if (!inst || (input && inst !== $.data(input, "datepicker"))) {
return;
}
if (this._datepickerShowing) {
showAnim = this._get(inst, "showAnim");
duration = this._get(inst, "duration");
postProcess = function() {
$.datepicker._tidyDialog(inst);
};
// DEPRECATED: after BC for 1.8.x $.effects[ showAnim ] is not needed
if ( $.effects && ( $.effects.effect[ showAnim ] || $.effects[ showAnim ] ) ) {
inst.dpDiv.hide(showAnim, $.datepicker._get(inst, "showOptions"), duration, postProcess);
} else {
inst.dpDiv[(showAnim === "slideDown" ? "slideUp" :
(showAnim === "fadeIn" ? "fadeOut" : "hide"))]((showAnim ? duration : null), postProcess);
}
if (!showAnim) {
postProcess();
}
this._datepickerShowing = false;
onClose = this._get(inst, "onClose");
if (onClose) {
onClose.apply((inst.input ? inst.input[0] : null), [(inst.input ? inst.input.val() : ""), inst]);
}
this._lastInput = null;
if (this._inDialog) {
this._dialogInput.css({ position: "absolute", left: "0", top: "-100px" });
if ($.blockUI) {
$.unblockUI();
$("body").append(this.dpDiv);
}
}
this._inDialog = false;
}
},
/* Tidy up after a dialog display. */
_tidyDialog: function(inst) {
inst.dpDiv.removeClass(this._dialogClass).unbind(".ui-datepicker-calendar");
},
/* Close date picker if clicked elsewhere. */
_checkExternalClick: function(event) {
if (!$.datepicker._curInst) {
return;
}
var $target = $(event.target),
inst = $.datepicker._getInst($target[0]);
if ( ( ( $target[0].id !== $.datepicker._mainDivId &&
$target.parents("#" + $.datepicker._mainDivId).length === 0 &&
!$target.hasClass($.datepicker.markerClassName) &&
!$target.closest("." + $.datepicker._triggerClass).length &&
$.datepicker._datepickerShowing && !($.datepicker._inDialog && $.blockUI) ) ) ||
( $target.hasClass($.datepicker.markerClassName) && $.datepicker._curInst !== inst ) ) {
$.datepicker._hideDatepicker();
}
},
/* Adjust one of the date sub-fields. */
_adjustDate: function(id, offset, period) {
var target = $(id),
inst = this._getInst(target[0]);
if (this._isDisabledDatepicker(target[0])) {
return;
}
this._adjustInstDate(inst, offset +
(period === "M" ? this._get(inst, "showCurrentAtPos") : 0), // undo positioning
period);
this._updateDatepicker(inst);
},
/* Action for current link. */
_gotoToday: function(id) {
var date,
target = $(id),
inst = this._getInst(target[0]);
if (this._get(inst, "gotoCurrent") && inst.currentDay) {
inst.selectedDay = inst.currentDay;
inst.drawMonth = inst.selectedMonth = inst.currentMonth;
inst.drawYear = inst.selectedYear = inst.currentYear;
} else {
date = new Date();
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
}
this._notifyChange(inst);
this._adjustDate(target);
},
/* Action for selecting a new month/year. */
_selectMonthYear: function(id, select, period) {
var target = $(id),
inst = this._getInst(target[0]);
inst["selected" + (period === "M" ? "Month" : "Year")] =
inst["draw" + (period === "M" ? "Month" : "Year")] =
parseInt(select.options[select.selectedIndex].value,10);
this._notifyChange(inst);
this._adjustDate(target);
},
/* Action for selecting a day. */
_selectDay: function(id, month, year, td) {
var inst,
target = $(id);
if ($(td).hasClass(this._unselectableClass) || this._isDisabledDatepicker(target[0])) {
return;
}
inst = this._getInst(target[0]);
inst.selectedDay = inst.currentDay = $("a", td).html();
inst.selectedMonth = inst.currentMonth = month;
inst.selectedYear = inst.currentYear = year;
this._selectDate(id, this._formatDate(inst,
inst.currentDay, inst.currentMonth, inst.currentYear));
},
/* Erase the input field and hide the date picker. */
_clearDate: function(id) {
var target = $(id);
this._selectDate(target, "");
},
/* Update the input field with the selected date. */
_selectDate: function(id, dateStr) {
var onSelect,
target = $(id),
inst = this._getInst(target[0]);
dateStr = (dateStr != null ? dateStr : this._formatDate(inst));
if (inst.input) {
inst.input.val(dateStr);
}
this._updateAlternate(inst);
onSelect = this._get(inst, "onSelect");
if (onSelect) {
onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]); // trigger custom callback
} else if (inst.input) {
inst.input.trigger("change"); // fire the change event
}
if (inst.inline){
this._updateDatepicker(inst);
} else {
this._hideDatepicker();
this._lastInput = inst.input[0];
if (typeof(inst.input[0]) !== "object") {
inst.input.focus(); // restore focus
}
this._lastInput = null;
}
},
/* Update any alternate field to synchronise with the main field. */
_updateAlternate: function(inst) {
var altFormat, date, dateStr,
altField = this._get(inst, "altField");
if (altField) { // update alternate field too
altFormat = this._get(inst, "altFormat") || this._get(inst, "dateFormat");
date = this._getDate(inst);
dateStr = this.formatDate(altFormat, date, this._getFormatConfig(inst));
$(altField).each(function() { $(this).val(dateStr); });
}
},
/* Set as beforeShowDay function to prevent selection of weekends.
* @param date Date - the date to customise
* @return [boolean, string] - is this date selectable?, what is its CSS class?
*/
noWeekends: function(date) {
var day = date.getDay();
return [(day > 0 && day < 6), ""];
},
/* Set as calculateWeek to determine the week of the year based on the ISO 8601 definition.
* @param date Date - the date to get the week for
* @return number - the number of the week within the year that contains this date
*/
iso8601Week: function(date) {
var time,
checkDate = new Date(date.getTime());
// Find Thursday of this week starting on Monday
checkDate.setDate(checkDate.getDate() + 4 - (checkDate.getDay() || 7));
time = checkDate.getTime();
checkDate.setMonth(0); // Compare with Jan 1
checkDate.setDate(1);
return Math.floor(Math.round((time - checkDate) / 86400000) / 7) + 1;
},
/* Parse a string value into a date object.
* See formatDate below for the possible formats.
*
* @param format string - the expected format of the date
* @param value string - the date in the above format
* @param settings Object - attributes include:
* shortYearCutoff number - the cutoff year for determining the century (optional)
* dayNamesShort string[7] - abbreviated names of the days from Sunday (optional)
* dayNames string[7] - names of the days from Sunday (optional)
* monthNamesShort string[12] - abbreviated names of the months (optional)
* monthNames string[12] - names of the months (optional)
* @return Date - the extracted date value or null if value is blank
*/
parseDate: function (format, value, settings) {
if (format == null || value == null) {
throw "Invalid arguments";
}
value = (typeof value === "object" ? value.toString() : value + "");
if (value === "") {
return null;
}
var iFormat, dim, extra,
iValue = 0,
shortYearCutoffTemp = (settings ? settings.shortYearCutoff : null) || this._defaults.shortYearCutoff,
shortYearCutoff = (typeof shortYearCutoffTemp !== "string" ? shortYearCutoffTemp :
new Date().getFullYear() % 100 + parseInt(shortYearCutoffTemp, 10)),
dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort,
dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames,
monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort,
monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames,
year = -1,
month = -1,
day = -1,
doy = -1,
literal = false,
date,
// Check whether a format character is doubled
lookAhead = function(match) {
var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match);
if (matches) {
iFormat++;
}
return matches;
},
// Extract a number from the string value
getNumber = function(match) {
var isDoubled = lookAhead(match),
size = (match === "@" ? 14 : (match === "!" ? 20 :
(match === "y" && isDoubled ? 4 : (match === "o" ? 3 : 2)))),
minSize = (match === "y" ? size : 1),
digits = new RegExp("^\\d{" + minSize + "," + size + "}"),
num = value.substring(iValue).match(digits);
if (!num) {
throw "Missing number at position " + iValue;
}
iValue += num[0].length;
return parseInt(num[0], 10);
},
// Extract a name from the string value and convert to an index
getName = function(match, shortNames, longNames) {
var index = -1,
names = $.map(lookAhead(match) ? longNames : shortNames, function (v, k) {
return [ [k, v] ];
}).sort(function (a, b) {
return -(a[1].length - b[1].length);
});
$.each(names, function (i, pair) {
var name = pair[1];
if (value.substr(iValue, name.length).toLowerCase() === name.toLowerCase()) {
index = pair[0];
iValue += name.length;
return false;
}
});
if (index !== -1) {
return index + 1;
} else {
throw "Unknown name at position " + iValue;
}
},
// Confirm that a literal character matches the string value
checkLiteral = function() {
if (value.charAt(iValue) !== format.charAt(iFormat)) {
throw "Unexpected literal at position " + iValue;
}
iValue++;
};
for (iFormat = 0; iFormat < format.length; iFormat++) {
if (literal) {
if (format.charAt(iFormat) === "'" && !lookAhead("'")) {
literal = false;
} else {
checkLiteral();
}
} else {
switch (format.charAt(iFormat)) {
case "d":
day = getNumber("d");
break;
case "D":
getName("D", dayNamesShort, dayNames);
break;
case "o":
doy = getNumber("o");
break;
case "m":
month = getNumber("m");
break;
case "M":
month = getName("M", monthNamesShort, monthNames);
break;
case "y":
year = getNumber("y");
break;
case "@":
date = new Date(getNumber("@"));
year = date.getFullYear();
month = date.getMonth() + 1;
day = date.getDate();
break;
case "!":
date = new Date((getNumber("!") - this._ticksTo1970) / 10000);
year = date.getFullYear();
month = date.getMonth() + 1;
day = date.getDate();
break;
case "'":
if (lookAhead("'")){
checkLiteral();
} else {
literal = true;
}
break;
default:
checkLiteral();
}
}
}
if (iValue < value.length){
extra = value.substr(iValue);
if (!/^\s+/.test(extra)) {
throw "Extra/unparsed characters found in date: " + extra;
}
}
if (year === -1) {
year = new Date().getFullYear();
} else if (year < 100) {
year += new Date().getFullYear() - new Date().getFullYear() % 100 +
(year <= shortYearCutoff ? 0 : -100);
}
if (doy > -1) {
month = 1;
day = doy;
do {
dim = this._getDaysInMonth(year, month - 1);
if (day <= dim) {
break;
}
month++;
day -= dim;
} while (true);
}
date = this._daylightSavingAdjust(new Date(year, month - 1, day));
if (date.getFullYear() !== year || date.getMonth() + 1 !== month || date.getDate() !== day) {
throw "Invalid date"; // E.g. 31/02/00
}
return date;
},
/* Standard date formats. */
ATOM: "yy-mm-dd", // RFC 3339 (ISO 8601)
COOKIE: "D, dd M yy",
ISO_8601: "yy-mm-dd",
RFC_822: "D, d M y",
RFC_850: "DD, dd-M-y",
RFC_1036: "D, d M y",
RFC_1123: "D, d M yy",
RFC_2822: "D, d M yy",
RSS: "D, d M y", // RFC 822
TICKS: "!",
TIMESTAMP: "@",
W3C: "yy-mm-dd", // ISO 8601
_ticksTo1970: (((1970 - 1) * 365 + Math.floor(1970 / 4) - Math.floor(1970 / 100) +
Math.floor(1970 / 400)) * 24 * 60 * 60 * 10000000),
/* Format a date object into a string value.
* The format can be combinations of the following:
* d - day of month (no leading zero)
* dd - day of month (two digit)
* o - day of year (no leading zeros)
* oo - day of year (three digit)
* D - day name short
* DD - day name long
* m - month of year (no leading zero)
* mm - month of year (two digit)
* M - month name short
* MM - month name long
* y - year (two digit)
* yy - year (four digit)
* @ - Unix timestamp (ms since 01/01/1970)
* ! - Windows ticks (100ns since 01/01/0001)
* "..." - literal text
* '' - single quote
*
* @param format string - the desired format of the date
* @param date Date - the date value to format
* @param settings Object - attributes include:
* dayNamesShort string[7] - abbreviated names of the days from Sunday (optional)
* dayNames string[7] - names of the days from Sunday (optional)
* monthNamesShort string[12] - abbreviated names of the months (optional)
* monthNames string[12] - names of the months (optional)
* @return string - the date in the above format
*/
formatDate: function (format, date, settings) {
if (!date) {
return "";
}
var iFormat,
dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort,
dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames,
monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort,
monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames,
// Check whether a format character is doubled
lookAhead = function(match) {
var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match);
if (matches) {
iFormat++;
}
return matches;
},
// Format a number, with leading zero if necessary
formatNumber = function(match, value, len) {
var num = "" + value;
if (lookAhead(match)) {
while (num.length < len) {
num = "0" + num;
}
}
return num;
},
// Format a name, short or long as requested
formatName = function(match, value, shortNames, longNames) {
return (lookAhead(match) ? longNames[value] : shortNames[value]);
},
output = "",
literal = false;
if (date) {
for (iFormat = 0; iFormat < format.length; iFormat++) {
if (literal) {
if (format.charAt(iFormat) === "'" && !lookAhead("'")) {
literal = false;
} else {
output += format.charAt(iFormat);
}
} else {
switch (format.charAt(iFormat)) {
case "d":
output += formatNumber("d", date.getDate(), 2);
break;
case "D":
output += formatName("D", date.getDay(), dayNamesShort, dayNames);
break;
case "o":
output += formatNumber("o",
Math.round((new Date(date.getFullYear(), date.getMonth(), date.getDate()).getTime() - new Date(date.getFullYear(), 0, 0).getTime()) / 86400000), 3);
break;
case "m":
output += formatNumber("m", date.getMonth() + 1, 2);
break;
case "M":
output += formatName("M", date.getMonth(), monthNamesShort, monthNames);
break;
case "y":
output += (lookAhead("y") ? date.getFullYear() :
(date.getYear() % 100 < 10 ? "0" : "") + date.getYear() % 100);
break;
case "@":
output += date.getTime();
break;
case "!":
output += date.getTime() * 10000 + this._ticksTo1970;
break;
case "'":
if (lookAhead("'")) {
output += "'";
} else {
literal = true;
}
break;
default:
output += format.charAt(iFormat);
}
}
}
}
return output;
},
/* Extract all possible characters from the date format. */
_possibleChars: function (format) {
var iFormat,
chars = "",
literal = false,
// Check whether a format character is doubled
lookAhead = function(match) {
var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match);
if (matches) {
iFormat++;
}
return matches;
};
for (iFormat = 0; iFormat < format.length; iFormat++) {
if (literal) {
if (format.charAt(iFormat) === "'" && !lookAhead("'")) {
literal = false;
} else {
chars += format.charAt(iFormat);
}
} else {
switch (format.charAt(iFormat)) {
case "d": case "m": case "y": case "@":
chars += "0123456789";
break;
case "D": case "M":
return null; // Accept anything
case "'":
if (lookAhead("'")) {
chars += "'";
} else {
literal = true;
}
break;
default:
chars += format.charAt(iFormat);
}
}
}
return chars;
},
/* Get a setting value, defaulting if necessary. */
_get: function(inst, name) {
return inst.settings[name] !== undefined ?
inst.settings[name] : this._defaults[name];
},
/* Parse existing date and initialise date picker. */
_setDateFromField: function(inst, noDefault) {
if (inst.input.val() === inst.lastVal) {
return;
}
var dateFormat = this._get(inst, "dateFormat"),
dates = inst.lastVal = inst.input ? inst.input.val() : null,
defaultDate = this._getDefaultDate(inst),
date = defaultDate,
settings = this._getFormatConfig(inst);
try {
date = this.parseDate(dateFormat, dates, settings) || defaultDate;
} catch (event) {
dates = (noDefault ? "" : dates);
}
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
inst.currentDay = (dates ? date.getDate() : 0);
inst.currentMonth = (dates ? date.getMonth() : 0);
inst.currentYear = (dates ? date.getFullYear() : 0);
this._adjustInstDate(inst);
},
/* Retrieve the default date shown on opening. */
_getDefaultDate: function(inst) {
return this._restrictMinMax(inst,
this._determineDate(inst, this._get(inst, "defaultDate"), new Date()));
},
/* A date may be specified as an exact value or a relative one. */
_determineDate: function(inst, date, defaultDate) {
var offsetNumeric = function(offset) {
var date = new Date();
date.setDate(date.getDate() + offset);
return date;
},
offsetString = function(offset) {
try {
return $.datepicker.parseDate($.datepicker._get(inst, "dateFormat"),
offset, $.datepicker._getFormatConfig(inst));
}
catch (e) {
// Ignore
}
var date = (offset.toLowerCase().match(/^c/) ?
$.datepicker._getDate(inst) : null) || new Date(),
year = date.getFullYear(),
month = date.getMonth(),
day = date.getDate(),
pattern = /([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,
matches = pattern.exec(offset);
while (matches) {
switch (matches[2] || "d") {
case "d" : case "D" :
day += parseInt(matches[1],10); break;
case "w" : case "W" :
day += parseInt(matches[1],10) * 7; break;
case "m" : case "M" :
month += parseInt(matches[1],10);
day = Math.min(day, $.datepicker._getDaysInMonth(year, month));
break;
case "y": case "Y" :
year += parseInt(matches[1],10);
day = Math.min(day, $.datepicker._getDaysInMonth(year, month));
break;
}
matches = pattern.exec(offset);
}
return new Date(year, month, day);
},
newDate = (date == null || date === "" ? defaultDate : (typeof date === "string" ? offsetString(date) :
(typeof date === "number" ? (isNaN(date) ? defaultDate : offsetNumeric(date)) : new Date(date.getTime()))));
newDate = (newDate && newDate.toString() === "Invalid Date" ? defaultDate : newDate);
if (newDate) {
newDate.setHours(0);
newDate.setMinutes(0);
newDate.setSeconds(0);
newDate.setMilliseconds(0);
}
return this._daylightSavingAdjust(newDate);
},
/* Handle switch to/from daylight saving.
* Hours may be non-zero on daylight saving cut-over:
* > 12 when midnight changeover, but then cannot generate
* midnight datetime, so jump to 1AM, otherwise reset.
* @param date (Date) the date to check
* @return (Date) the corrected date
*/
_daylightSavingAdjust: function(date) {
if (!date) {
return null;
}
date.setHours(date.getHours() > 12 ? date.getHours() + 2 : 0);
return date;
},
/* Set the date(s) directly. */
_setDate: function(inst, date, noChange) {
var clear = !date,
origMonth = inst.selectedMonth,
origYear = inst.selectedYear,
newDate = this._restrictMinMax(inst, this._determineDate(inst, date, new Date()));
inst.selectedDay = inst.currentDay = newDate.getDate();
inst.drawMonth = inst.selectedMonth = inst.currentMonth = newDate.getMonth();
inst.drawYear = inst.selectedYear = inst.currentYear = newDate.getFullYear();
if ((origMonth !== inst.selectedMonth || origYear !== inst.selectedYear) && !noChange) {
this._notifyChange(inst);
}
this._adjustInstDate(inst);
if (inst.input) {
inst.input.val(clear ? "" : this._formatDate(inst));
}
},
/* Retrieve the date(s) directly. */
_getDate: function(inst) {
var startDate = (!inst.currentYear || (inst.input && inst.input.val() === "") ? null :
this._daylightSavingAdjust(new Date(
inst.currentYear, inst.currentMonth, inst.currentDay)));
return startDate;
},
/* Attach the onxxx handlers. These are declared statically so
* they work with static code transformers like Caja.
*/
_attachHandlers: function(inst) {
var stepMonths = this._get(inst, "stepMonths"),
id = "#" + inst.id.replace( /\\\\/g, "\\" );
inst.dpDiv.find("[data-handler]").map(function () {
var handler = {
prev: function () {
$.datepicker._adjustDate(id, -stepMonths, "M");
},
next: function () {
$.datepicker._adjustDate(id, +stepMonths, "M");
},
hide: function () {
$.datepicker._hideDatepicker();
},
today: function () {
$.datepicker._gotoToday(id);
},
selectDay: function () {
$.datepicker._selectDay(id, +this.getAttribute("data-month"), +this.getAttribute("data-year"), this);
return false;
},
selectMonth: function () {
$.datepicker._selectMonthYear(id, this, "M");
return false;
},
selectYear: function () {
$.datepicker._selectMonthYear(id, this, "Y");
return false;
}
};
$(this).bind(this.getAttribute("data-event"), handler[this.getAttribute("data-handler")]);
});
},
/* Generate the HTML for the current state of the date picker. */
_generateHTML: function(inst) {
var maxDraw, prevText, prev, nextText, next, currentText, gotoDate,
controls, buttonPanel, firstDay, showWeek, dayNames, dayNamesMin,
monthNames, monthNamesShort, beforeShowDay, showOtherMonths,
selectOtherMonths, defaultDate, html, dow, row, group, col, selectedDate,
cornerClass, calender, thead, day, daysInMonth, leadDays, curRows, numRows,
printDate, dRow, tbody, daySettings, otherMonth, unselectable,
tempDate = new Date(),
today = this._daylightSavingAdjust(
new Date(tempDate.getFullYear(), tempDate.getMonth(), tempDate.getDate())), // clear time
isRTL = this._get(inst, "isRTL"),
showButtonPanel = this._get(inst, "showButtonPanel"),
hideIfNoPrevNext = this._get(inst, "hideIfNoPrevNext"),
navigationAsDateFormat = this._get(inst, "navigationAsDateFormat"),
numMonths = this._getNumberOfMonths(inst),
showCurrentAtPos = this._get(inst, "showCurrentAtPos"),
stepMonths = this._get(inst, "stepMonths"),
isMultiMonth = (numMonths[0] !== 1 || numMonths[1] !== 1),
currentDate = this._daylightSavingAdjust((!inst.currentDay ? new Date(9999, 9, 9) :
new Date(inst.currentYear, inst.currentMonth, inst.currentDay))),
minDate = this._getMinMaxDate(inst, "min"),
maxDate = this._getMinMaxDate(inst, "max"),
drawMonth = inst.drawMonth - showCurrentAtPos,
drawYear = inst.drawYear;
if (drawMonth < 0) {
drawMonth += 12;
drawYear--;
}
if (maxDate) {
maxDraw = this._daylightSavingAdjust(new Date(maxDate.getFullYear(),
maxDate.getMonth() - (numMonths[0] * numMonths[1]) + 1, maxDate.getDate()));
maxDraw = (minDate && maxDraw < minDate ? minDate : maxDraw);
while (this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1)) > maxDraw) {
drawMonth--;
if (drawMonth < 0) {
drawMonth = 11;
drawYear--;
}
}
}
inst.drawMonth = drawMonth;
inst.drawYear = drawYear;
prevText = this._get(inst, "prevText");
prevText = (!navigationAsDateFormat ? prevText : this.formatDate(prevText,
this._daylightSavingAdjust(new Date(drawYear, drawMonth - stepMonths, 1)),
this._getFormatConfig(inst)));
prev = (this._canAdjustMonth(inst, -1, drawYear, drawMonth) ?
"<a class='ui-datepicker-prev ui-corner-all' data-handler='prev' data-event='click'" +
" title='" + prevText + "'><span class='ui-icon ui-icon-circle-triangle-" + ( isRTL ? "e" : "w") + "'>" + prevText + "</span></a>" :
(hideIfNoPrevNext ? "" : "<a class='ui-datepicker-prev ui-corner-all ui-state-disabled' title='"+ prevText +"'><span class='ui-icon ui-icon-circle-triangle-" + ( isRTL ? "e" : "w") + "'>" + prevText + "</span></a>"));
nextText = this._get(inst, "nextText");
nextText = (!navigationAsDateFormat ? nextText : this.formatDate(nextText,
this._daylightSavingAdjust(new Date(drawYear, drawMonth + stepMonths, 1)),
this._getFormatConfig(inst)));
next = (this._canAdjustMonth(inst, +1, drawYear, drawMonth) ?
"<a class='ui-datepicker-next ui-corner-all' data-handler='next' data-event='click'" +
" title='" + nextText + "'><span class='ui-icon ui-icon-circle-triangle-" + ( isRTL ? "w" : "e") + "'>" + nextText + "</span></a>" :
(hideIfNoPrevNext ? "" : "<a class='ui-datepicker-next ui-corner-all ui-state-disabled' title='"+ nextText + "'><span class='ui-icon ui-icon-circle-triangle-" + ( isRTL ? "w" : "e") + "'>" + nextText + "</span></a>"));
currentText = this._get(inst, "currentText");
gotoDate = (this._get(inst, "gotoCurrent") && inst.currentDay ? currentDate : today);
currentText = (!navigationAsDateFormat ? currentText :
this.formatDate(currentText, gotoDate, this._getFormatConfig(inst)));
controls = (!inst.inline ? "<button type='button' class='ui-datepicker-close ui-state-default ui-priority-primary ui-corner-all' data-handler='hide' data-event='click'>" +
this._get(inst, "closeText") + "</button>" : "");
buttonPanel = (showButtonPanel) ? "<div class='ui-datepicker-buttonpane ui-widget-content'>" + (isRTL ? controls : "") +
(this._isInRange(inst, gotoDate) ? "<button type='button' class='ui-datepicker-current ui-state-default ui-priority-secondary ui-corner-all' data-handler='today' data-event='click'" +
">" + currentText + "</button>" : "") + (isRTL ? "" : controls) + "</div>" : "";
firstDay = parseInt(this._get(inst, "firstDay"),10);
firstDay = (isNaN(firstDay) ? 0 : firstDay);
showWeek = this._get(inst, "showWeek");
dayNames = this._get(inst, "dayNames");
dayNamesMin = this._get(inst, "dayNamesMin");
monthNames = this._get(inst, "monthNames");
monthNamesShort = this._get(inst, "monthNamesShort");
beforeShowDay = this._get(inst, "beforeShowDay");
showOtherMonths = this._get(inst, "showOtherMonths");
selectOtherMonths = this._get(inst, "selectOtherMonths");
defaultDate = this._getDefaultDate(inst);
html = "";
dow;
for (row = 0; row < numMonths[0]; row++) {
group = "";
this.maxRows = 4;
for (col = 0; col < numMonths[1]; col++) {
selectedDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, inst.selectedDay));
cornerClass = " ui-corner-all";
calender = "";
if (isMultiMonth) {
calender += "<div class='ui-datepicker-group";
if (numMonths[1] > 1) {
switch (col) {
case 0: calender += " ui-datepicker-group-first";
cornerClass = " ui-corner-" + (isRTL ? "right" : "left"); break;
case numMonths[1]-1: calender += " ui-datepicker-group-last";
cornerClass = " ui-corner-" + (isRTL ? "left" : "right"); break;
default: calender += " ui-datepicker-group-middle"; cornerClass = ""; break;
}
}
calender += "'>";
}
calender += "<div class='ui-datepicker-header ui-widget-header ui-helper-clearfix" + cornerClass + "'>" +
(/all|left/.test(cornerClass) && row === 0 ? (isRTL ? next : prev) : "") +
(/all|right/.test(cornerClass) && row === 0 ? (isRTL ? prev : next) : "") +
this._generateMonthYearHeader(inst, drawMonth, drawYear, minDate, maxDate,
row > 0 || col > 0, monthNames, monthNamesShort) + // draw month headers
"</div><table class='ui-datepicker-calendar'><thead>" +
"<tr>";
thead = (showWeek ? "<th class='ui-datepicker-week-col'>" + this._get(inst, "weekHeader") + "</th>" : "");
for (dow = 0; dow < 7; dow++) { // days of the week
day = (dow + firstDay) % 7;
thead += "<th scope='col'" + ((dow + firstDay + 6) % 7 >= 5 ? " class='ui-datepicker-week-end'" : "") + ">" +
"<span title='" + dayNames[day] + "'>" + dayNamesMin[day] + "</span></th>";
}
calender += thead + "</tr></thead><tbody>";
daysInMonth = this._getDaysInMonth(drawYear, drawMonth);
if (drawYear === inst.selectedYear && drawMonth === inst.selectedMonth) {
inst.selectedDay = Math.min(inst.selectedDay, daysInMonth);
}
leadDays = (this._getFirstDayOfMonth(drawYear, drawMonth) - firstDay + 7) % 7;
curRows = Math.ceil((leadDays + daysInMonth) / 7); // calculate the number of rows to generate
numRows = (isMultiMonth ? this.maxRows > curRows ? this.maxRows : curRows : curRows); //If multiple months, use the higher number of rows (see #7043)
this.maxRows = numRows;
printDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1 - leadDays));
for (dRow = 0; dRow < numRows; dRow++) { // create date picker rows
calender += "<tr>";
tbody = (!showWeek ? "" : "<td class='ui-datepicker-week-col'>" +
this._get(inst, "calculateWeek")(printDate) + "</td>");
for (dow = 0; dow < 7; dow++) { // create date picker days
daySettings = (beforeShowDay ?
beforeShowDay.apply((inst.input ? inst.input[0] : null), [printDate]) : [true, ""]);
otherMonth = (printDate.getMonth() !== drawMonth);
unselectable = (otherMonth && !selectOtherMonths) || !daySettings[0] ||
(minDate && printDate < minDate) || (maxDate && printDate > maxDate);
tbody += "<td class='" +
((dow + firstDay + 6) % 7 >= 5 ? " ui-datepicker-week-end" : "") + // highlight weekends
(otherMonth ? " ui-datepicker-other-month" : "") + // highlight days from other months
((printDate.getTime() === selectedDate.getTime() && drawMonth === inst.selectedMonth && inst._keyEvent) || // user pressed key
(defaultDate.getTime() === printDate.getTime() && defaultDate.getTime() === selectedDate.getTime()) ?
// or defaultDate is current printedDate and defaultDate is selectedDate
" " + this._dayOverClass : "") + // highlight selected day
(unselectable ? " " + this._unselectableClass + " ui-state-disabled": "") + // highlight unselectable days
(otherMonth && !showOtherMonths ? "" : " " + daySettings[1] + // highlight custom dates
(printDate.getTime() === currentDate.getTime() ? " " + this._currentClass : "") + // highlight selected day
(printDate.getTime() === today.getTime() ? " ui-datepicker-today" : "")) + "'" + // highlight today (if different)
((!otherMonth || showOtherMonths) && daySettings[2] ? " title='" + daySettings[2].replace(/'/g, "'") + "'" : "") + // cell title
(unselectable ? "" : " data-handler='selectDay' data-event='click' data-month='" + printDate.getMonth() + "' data-year='" + printDate.getFullYear() + "'") + ">" + // actions
(otherMonth && !showOtherMonths ? " " : // display for other months
(unselectable ? "<span class='ui-state-default'>" + printDate.getDate() + "</span>" : "<a class='ui-state-default" +
(printDate.getTime() === today.getTime() ? " ui-state-highlight" : "") +
(printDate.getTime() === currentDate.getTime() ? " ui-state-active" : "") + // highlight selected day
(otherMonth ? " ui-priority-secondary" : "") + // distinguish dates from other months
"' href='#'>" + printDate.getDate() + "</a>")) + "</td>"; // display selectable date
printDate.setDate(printDate.getDate() + 1);
printDate = this._daylightSavingAdjust(printDate);
}
calender += tbody + "</tr>";
}
drawMonth++;
if (drawMonth > 11) {
drawMonth = 0;
drawYear++;
}
calender += "</tbody></table>" + (isMultiMonth ? "</div>" +
((numMonths[0] > 0 && col === numMonths[1]-1) ? "<div class='ui-datepicker-row-break'></div>" : "") : "");
group += calender;
}
html += group;
}
html += buttonPanel;
inst._keyEvent = false;
return html;
},
/* Generate the month and year header. */
_generateMonthYearHeader: function(inst, drawMonth, drawYear, minDate, maxDate,
secondary, monthNames, monthNamesShort) {
var inMinYear, inMaxYear, month, years, thisYear, determineYear, year, endYear,
changeMonth = this._get(inst, "changeMonth"),
changeYear = this._get(inst, "changeYear"),
showMonthAfterYear = this._get(inst, "showMonthAfterYear"),
html = "<div class='ui-datepicker-title'>",
monthHtml = "";
// month selection
if (secondary || !changeMonth) {
monthHtml += "<span class='ui-datepicker-month'>" + monthNames[drawMonth] + "</span>";
} else {
inMinYear = (minDate && minDate.getFullYear() === drawYear);
inMaxYear = (maxDate && maxDate.getFullYear() === drawYear);
monthHtml += "<select class='ui-datepicker-month' data-handler='selectMonth' data-event='change'>";
for ( month = 0; month < 12; month++) {
if ((!inMinYear || month >= minDate.getMonth()) && (!inMaxYear || month <= maxDate.getMonth())) {
monthHtml += "<option value='" + month + "'" +
(month === drawMonth ? " selected='selected'" : "") +
">" + monthNamesShort[month] + "</option>";
}
}
monthHtml += "</select>";
}
if (!showMonthAfterYear) {
html += monthHtml + (secondary || !(changeMonth && changeYear) ? " " : "");
}
// year selection
if ( !inst.yearshtml ) {
inst.yearshtml = "";
if (secondary || !changeYear) {
html += "<span class='ui-datepicker-year'>" + drawYear + "</span>";
} else {
// determine range of years to display
years = this._get(inst, "yearRange").split(":");
thisYear = new Date().getFullYear();
determineYear = function(value) {
var year = (value.match(/c[+\-].*/) ? drawYear + parseInt(value.substring(1), 10) :
(value.match(/[+\-].*/) ? thisYear + parseInt(value, 10) :
parseInt(value, 10)));
return (isNaN(year) ? thisYear : year);
};
year = determineYear(years[0]);
endYear = Math.max(year, determineYear(years[1] || ""));
year = (minDate ? Math.max(year, minDate.getFullYear()) : year);
endYear = (maxDate ? Math.min(endYear, maxDate.getFullYear()) : endYear);
inst.yearshtml += "<select class='ui-datepicker-year' data-handler='selectYear' data-event='change'>";
for (; year <= endYear; year++) {
inst.yearshtml += "<option value='" + year + "'" +
(year === drawYear ? " selected='selected'" : "") +
">" + year + "</option>";
}
inst.yearshtml += "</select>";
html += inst.yearshtml;
inst.yearshtml = null;
}
}
html += this._get(inst, "yearSuffix");
if (showMonthAfterYear) {
html += (secondary || !(changeMonth && changeYear) ? " " : "") + monthHtml;
}
html += "</div>"; // Close datepicker_header
return html;
},
/* Adjust one of the date sub-fields. */
_adjustInstDate: function(inst, offset, period) {
var year = inst.drawYear + (period === "Y" ? offset : 0),
month = inst.drawMonth + (period === "M" ? offset : 0),
day = Math.min(inst.selectedDay, this._getDaysInMonth(year, month)) + (period === "D" ? offset : 0),
date = this._restrictMinMax(inst, this._daylightSavingAdjust(new Date(year, month, day)));
inst.selectedDay = date.getDate();
inst.drawMonth = inst.selectedMonth = date.getMonth();
inst.drawYear = inst.selectedYear = date.getFullYear();
if (period === "M" || period === "Y") {
this._notifyChange(inst);
}
},
/* Ensure a date is within any min/max bounds. */
_restrictMinMax: function(inst, date) {
var minDate = this._getMinMaxDate(inst, "min"),
maxDate = this._getMinMaxDate(inst, "max"),
newDate = (minDate && date < minDate ? minDate : date);
return (maxDate && newDate > maxDate ? maxDate : newDate);
},
/* Notify change of month/year. */
_notifyChange: function(inst) {
var onChange = this._get(inst, "onChangeMonthYear");
if (onChange) {
onChange.apply((inst.input ? inst.input[0] : null),
[inst.selectedYear, inst.selectedMonth + 1, inst]);
}
},
/* Determine the number of months to show. */
_getNumberOfMonths: function(inst) {
var numMonths = this._get(inst, "numberOfMonths");
return (numMonths == null ? [1, 1] : (typeof numMonths === "number" ? [1, numMonths] : numMonths));
},
/* Determine the current maximum date - ensure no time components are set. */
_getMinMaxDate: function(inst, minMax) {
return this._determineDate(inst, this._get(inst, minMax + "Date"), null);
},
/* Find the number of days in a given month. */
_getDaysInMonth: function(year, month) {
return 32 - this._daylightSavingAdjust(new Date(year, month, 32)).getDate();
},
/* Find the day of the week of the first of a month. */
_getFirstDayOfMonth: function(year, month) {
return new Date(year, month, 1).getDay();
},
/* Determines if we should allow a "next/prev" month display change. */
_canAdjustMonth: function(inst, offset, curYear, curMonth) {
var numMonths = this._getNumberOfMonths(inst),
date = this._daylightSavingAdjust(new Date(curYear,
curMonth + (offset < 0 ? offset : numMonths[0] * numMonths[1]), 1));
if (offset < 0) {
date.setDate(this._getDaysInMonth(date.getFullYear(), date.getMonth()));
}
return this._isInRange(inst, date);
},
/* Is the given date in the accepted range? */
_isInRange: function(inst, date) {
var yearSplit, currentYear,
minDate = this._getMinMaxDate(inst, "min"),
maxDate = this._getMinMaxDate(inst, "max"),
minYear = null,
maxYear = null,
years = this._get(inst, "yearRange");
if (years){
yearSplit = years.split(":");
currentYear = new Date().getFullYear();
minYear = parseInt(yearSplit[0], 10);
maxYear = parseInt(yearSplit[1], 10);
if ( yearSplit[0].match(/[+\-].*/) ) {
minYear += currentYear;
}
if ( yearSplit[1].match(/[+\-].*/) ) {
maxYear += currentYear;
}
}
return ((!minDate || date.getTime() >= minDate.getTime()) &&
(!maxDate || date.getTime() <= maxDate.getTime()) &&
(!minYear || date.getFullYear() >= minYear) &&
(!maxYear || date.getFullYear() <= maxYear));
},
/* Provide the configuration settings for formatting/parsing. */
_getFormatConfig: function(inst) {
var shortYearCutoff = this._get(inst, "shortYearCutoff");
shortYearCutoff = (typeof shortYearCutoff !== "string" ? shortYearCutoff :
new Date().getFullYear() % 100 + parseInt(shortYearCutoff, 10));
return {shortYearCutoff: shortYearCutoff,
dayNamesShort: this._get(inst, "dayNamesShort"), dayNames: this._get(inst, "dayNames"),
monthNamesShort: this._get(inst, "monthNamesShort"), monthNames: this._get(inst, "monthNames")};
},
/* Format the given date for display. */
_formatDate: function(inst, day, month, year) {
if (!day) {
inst.currentDay = inst.selectedDay;
inst.currentMonth = inst.selectedMonth;
inst.currentYear = inst.selectedYear;
}
var date = (day ? (typeof day === "object" ? day :
this._daylightSavingAdjust(new Date(year, month, day))) :
this._daylightSavingAdjust(new Date(inst.currentYear, inst.currentMonth, inst.currentDay)));
return this.formatDate(this._get(inst, "dateFormat"), date, this._getFormatConfig(inst));
}
});
/*
* Bind hover events for datepicker elements.
* Done via delegate so the binding only occurs once in the lifetime of the parent div.
* Global datepicker_instActive, set by _updateDatepicker allows the handlers to find their way back to the active picker.
*/
function datepicker_bindHover(dpDiv) {
var selector = "button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";
return dpDiv.delegate(selector, "mouseout", function() {
$(this).removeClass("ui-state-hover");
if (this.className.indexOf("ui-datepicker-prev") !== -1) {
$(this).removeClass("ui-datepicker-prev-hover");
}
if (this.className.indexOf("ui-datepicker-next") !== -1) {
$(this).removeClass("ui-datepicker-next-hover");
}
})
.delegate( selector, "mouseover", datepicker_handleMouseover );
}
function datepicker_handleMouseover() {
if (!$.datepicker._isDisabledDatepicker( datepicker_instActive.inline? datepicker_instActive.dpDiv.parent()[0] : datepicker_instActive.input[0])) {
$(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover");
$(this).addClass("ui-state-hover");
if (this.className.indexOf("ui-datepicker-prev") !== -1) {
$(this).addClass("ui-datepicker-prev-hover");
}
if (this.className.indexOf("ui-datepicker-next") !== -1) {
$(this).addClass("ui-datepicker-next-hover");
}
}
}
/* jQuery extend now ignores nulls! */
function datepicker_extendRemove(target, props) {
$.extend(target, props);
for (var name in props) {
if (props[name] == null) {
target[name] = props[name];
}
}
return target;
}
/* Invoke the datepicker functionality.
@param options string - a command, optionally followed by additional parameters or
Object - settings for attaching new datepicker functionality
@return jQuery object */
$.fn.datepicker = function(options){
/* Verify an empty collection wasn't passed - Fixes #6976 */
if ( !this.length ) {
return this;
}
/* Initialise the date picker. */
if (!$.datepicker.initialized) {
$(document).mousedown($.datepicker._checkExternalClick);
$.datepicker.initialized = true;
}
/* Append datepicker main container to body if not exist. */
if ($("#"+$.datepicker._mainDivId).length === 0) {
$("body").append($.datepicker.dpDiv);
}
var otherArgs = Array.prototype.slice.call(arguments, 1);
if (typeof options === "string" && (options === "isDisabled" || options === "getDate" || options === "widget")) {
return $.datepicker["_" + options + "Datepicker"].
apply($.datepicker, [this[0]].concat(otherArgs));
}
if (options === "option" && arguments.length === 2 && typeof arguments[1] === "string") {
return $.datepicker["_" + options + "Datepicker"].
apply($.datepicker, [this[0]].concat(otherArgs));
}
return this.each(function() {
typeof options === "string" ?
$.datepicker["_" + options + "Datepicker"].
apply($.datepicker, [this].concat(otherArgs)) :
$.datepicker._attachDatepicker(this, options);
});
};
$.datepicker = new Datepicker(); // singleton instance
$.datepicker.initialized = false;
$.datepicker.uuid = new Date().getTime();
$.datepicker.version = "1.11.4";
var datepicker = $.datepicker;
}));<|fim▁end|> | onSelect: null, // Define a callback function when a date is selected
onChangeMonthYear: null, // Define a callback function when the month or year is changed
onClose: null, // Define a callback function when the datepicker is closed
numberOfMonths: 1, // Number of months to show at a time |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>/**
* Copyright 2015 CANAL+ Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.<|fim▁hole|> *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* /!\ This file is feature-switchable.
* It always should be imported through the `features` object.
*/
import parseTTMLStringToVTT from "./parse_ttml_to_vtt";
export default parseTTMLStringToVTT;<|fim▁end|> | * You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.